Import prebuilt clang toolchain for linux.
diff --git a/linux-x64/clang/include/llvm/ADT/APFloat.h b/linux-x64/clang/include/llvm/ADT/APFloat.h
new file mode 100644
index 0000000..6c0b6ae
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/APFloat.h
@@ -0,0 +1,1249 @@
+//===- llvm/ADT/APFloat.h - Arbitrary Precision Floating Point ---*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief
+/// This file declares a class to represent arbitrary precision floating point
+/// values and provide a variety of arithmetic operations on them.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_APFLOAT_H
+#define LLVM_ADT_APFLOAT_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <memory>
+
+#define APFLOAT_DISPATCH_ON_SEMANTICS(METHOD_CALL)                             \
+  do {                                                                         \
+    if (usesLayout<IEEEFloat>(getSemantics()))                                 \
+      return U.IEEE.METHOD_CALL;                                               \
+    if (usesLayout<DoubleAPFloat>(getSemantics()))                             \
+      return U.Double.METHOD_CALL;                                             \
+    llvm_unreachable("Unexpected semantics");                                  \
+  } while (false)
+
+namespace llvm {
+
+struct fltSemantics;
+class APSInt;
+class StringRef;
+class APFloat;
+class raw_ostream;
+
+template <typename T> class SmallVectorImpl;
+
+/// Enum that represents what fraction of the LSB truncated bits of an fp number
+/// represent.
+///
+/// This essentially combines the roles of guard and sticky bits.
+enum lostFraction { // Example of truncated bits:
+  lfExactlyZero,    // 000000
+  lfLessThanHalf,   // 0xxxxx  x's not all zero
+  lfExactlyHalf,    // 100000
+  lfMoreThanHalf    // 1xxxxx  x's not all zero
+};
+
+/// A self-contained host- and target-independent arbitrary-precision
+/// floating-point software implementation.
+///
+/// APFloat uses bignum integer arithmetic as provided by static functions in
+/// the APInt class.  The library will work with bignum integers whose parts are
+/// any unsigned type at least 16 bits wide, but 64 bits is recommended.
+///
+/// Written for clarity rather than speed, in particular with a view to use in
+/// the front-end of a cross compiler so that target arithmetic can be correctly
+/// performed on the host.  Performance should nonetheless be reasonable,
+/// particularly for its intended use.  It may be useful as a base
+/// implementation for a run-time library during development of a faster
+/// target-specific one.
+///
+/// All 5 rounding modes in the IEEE-754R draft are handled correctly for all
+/// implemented operations.  Currently implemented operations are add, subtract,
+/// multiply, divide, fused-multiply-add, conversion-to-float,
+/// conversion-to-integer and conversion-from-integer.  New rounding modes
+/// (e.g. away from zero) can be added with three or four lines of code.
+///
+/// Four formats are built-in: IEEE single precision, double precision,
+/// quadruple precision, and x87 80-bit extended double (when operating with
+/// full extended precision).  Adding a new format that obeys IEEE semantics
+/// only requires adding two lines of code: a declaration and definition of the
+/// format.
+///
+/// All operations return the status of that operation as an exception bit-mask,
+/// so multiple operations can be done consecutively with their results or-ed
+/// together.  The returned status can be useful for compiler diagnostics; e.g.,
+/// inexact, underflow and overflow can be easily diagnosed on constant folding,
+/// and compiler optimizers can determine what exceptions would be raised by
+/// folding operations and optimize, or perhaps not optimize, accordingly.
+///
+/// At present, underflow tininess is detected after rounding; it should be
+/// straight forward to add support for the before-rounding case too.
+///
+/// The library reads hexadecimal floating point numbers as per C99, and
+/// correctly rounds if necessary according to the specified rounding mode.
+/// Syntax is required to have been validated by the caller.  It also converts
+/// floating point numbers to hexadecimal text as per the C99 %a and %A
+/// conversions.  The output precision (or alternatively the natural minimal
+/// precision) can be specified; if the requested precision is less than the
+/// natural precision the output is correctly rounded for the specified rounding
+/// mode.
+///
+/// It also reads decimal floating point numbers and correctly rounds according
+/// to the specified rounding mode.
+///
+/// Conversion to decimal text is not currently implemented.
+///
+/// Non-zero finite numbers are represented internally as a sign bit, a 16-bit
+/// signed exponent, and the significand as an array of integer parts.  After
+/// normalization of a number of precision P the exponent is within the range of
+/// the format, and if the number is not denormal the P-th bit of the
+/// significand is set as an explicit integer bit.  For denormals the most
+/// significant bit is shifted right so that the exponent is maintained at the
+/// format's minimum, so that the smallest denormal has just the least
+/// significant bit of the significand set.  The sign of zeroes and infinities
+/// is significant; the exponent and significand of such numbers is not stored,
+/// but has a known implicit (deterministic) value: 0 for the significands, 0
+/// for zero exponent, all 1 bits for infinity exponent.  For NaNs the sign and
+/// significand are deterministic, although not really meaningful, and preserved
+/// in non-conversion operations.  The exponent is implicitly all 1 bits.
+///
+/// APFloat does not provide any exception handling beyond default exception
+/// handling. We represent Signaling NaNs via IEEE-754R 2008 6.2.1 should clause
+/// by encoding Signaling NaNs with the first bit of its trailing significand as
+/// 0.
+///
+/// TODO
+/// ====
+///
+/// Some features that may or may not be worth adding:
+///
+/// Binary to decimal conversion (hard).
+///
+/// Optional ability to detect underflow tininess before rounding.
+///
+/// New formats: x87 in single and double precision mode (IEEE apart from
+/// extended exponent range) (hard).
+///
+/// New operations: sqrt, IEEE remainder, C90 fmod, nexttoward.
+///
+
+// This is the common type definitions shared by APFloat and its internal
+// implementation classes. This struct should not define any non-static data
+// members.
+struct APFloatBase {
+  typedef APInt::WordType integerPart;
+  static const unsigned integerPartWidth = APInt::APINT_BITS_PER_WORD;
+
+  /// A signed type to represent a floating point numbers unbiased exponent.
+  typedef signed short ExponentType;
+
+  /// \name Floating Point Semantics.
+  /// @{
+
+  static const fltSemantics &IEEEhalf() LLVM_READNONE;
+  static const fltSemantics &IEEEsingle() LLVM_READNONE;
+  static const fltSemantics &IEEEdouble() LLVM_READNONE;
+  static const fltSemantics &IEEEquad() LLVM_READNONE;
+  static const fltSemantics &PPCDoubleDouble() LLVM_READNONE;
+  static const fltSemantics &x87DoubleExtended() LLVM_READNONE;
+
+  /// A Pseudo fltsemantic used to construct APFloats that cannot conflict with
+  /// anything real.
+  static const fltSemantics &Bogus() LLVM_READNONE;
+
+  /// @}
+
+  /// IEEE-754R 5.11: Floating Point Comparison Relations.
+  enum cmpResult {
+    cmpLessThan,
+    cmpEqual,
+    cmpGreaterThan,
+    cmpUnordered
+  };
+
+  /// IEEE-754R 4.3: Rounding-direction attributes.
+  enum roundingMode {
+    rmNearestTiesToEven,
+    rmTowardPositive,
+    rmTowardNegative,
+    rmTowardZero,
+    rmNearestTiesToAway
+  };
+
+  /// IEEE-754R 7: Default exception handling.
+  ///
+  /// opUnderflow or opOverflow are always returned or-ed with opInexact.
+  enum opStatus {
+    opOK = 0x00,
+    opInvalidOp = 0x01,
+    opDivByZero = 0x02,
+    opOverflow = 0x04,
+    opUnderflow = 0x08,
+    opInexact = 0x10
+  };
+
+  /// Category of internally-represented number.
+  enum fltCategory {
+    fcInfinity,
+    fcNaN,
+    fcNormal,
+    fcZero
+  };
+
+  /// Convenience enum used to construct an uninitialized APFloat.
+  enum uninitializedTag {
+    uninitialized
+  };
+
+  /// Enumeration of \c ilogb error results.
+  enum IlogbErrorKinds {
+    IEK_Zero = INT_MIN + 1,
+    IEK_NaN = INT_MIN,
+    IEK_Inf = INT_MAX
+  };
+
+  static unsigned int semanticsPrecision(const fltSemantics &);
+  static ExponentType semanticsMinExponent(const fltSemantics &);
+  static ExponentType semanticsMaxExponent(const fltSemantics &);
+  static unsigned int semanticsSizeInBits(const fltSemantics &);
+
+  /// Returns the size of the floating point number (in bits) in the given
+  /// semantics.
+  static unsigned getSizeInBits(const fltSemantics &Sem);
+};
+
+namespace detail {
+
+class IEEEFloat final : public APFloatBase {
+public:
+  /// \name Constructors
+  /// @{
+
+  IEEEFloat(const fltSemantics &); // Default construct to 0.0
+  IEEEFloat(const fltSemantics &, integerPart);
+  IEEEFloat(const fltSemantics &, uninitializedTag);
+  IEEEFloat(const fltSemantics &, const APInt &);
+  explicit IEEEFloat(double d);
+  explicit IEEEFloat(float f);
+  IEEEFloat(const IEEEFloat &);
+  IEEEFloat(IEEEFloat &&);
+  ~IEEEFloat();
+
+  /// @}
+
+  /// Returns whether this instance allocated memory.
+  bool needsCleanup() const { return partCount() > 1; }
+
+  /// \name Convenience "constructors"
+  /// @{
+
+  /// @}
+
+  /// \name Arithmetic
+  /// @{
+
+  opStatus add(const IEEEFloat &, roundingMode);
+  opStatus subtract(const IEEEFloat &, roundingMode);
+  opStatus multiply(const IEEEFloat &, roundingMode);
+  opStatus divide(const IEEEFloat &, roundingMode);
+  /// IEEE remainder.
+  opStatus remainder(const IEEEFloat &);
+  /// C fmod, or llvm frem.
+  opStatus mod(const IEEEFloat &);
+  opStatus fusedMultiplyAdd(const IEEEFloat &, const IEEEFloat &, roundingMode);
+  opStatus roundToIntegral(roundingMode);
+  /// IEEE-754R 5.3.1: nextUp/nextDown.
+  opStatus next(bool nextDown);
+
+  /// @}
+
+  /// \name Sign operations.
+  /// @{
+
+  void changeSign();
+
+  /// @}
+
+  /// \name Conversions
+  /// @{
+
+  opStatus convert(const fltSemantics &, roundingMode, bool *);
+  opStatus convertToInteger(MutableArrayRef<integerPart>, unsigned int, bool,
+                            roundingMode, bool *) const;
+  opStatus convertFromAPInt(const APInt &, bool, roundingMode);
+  opStatus convertFromSignExtendedInteger(const integerPart *, unsigned int,
+                                          bool, roundingMode);
+  opStatus convertFromZeroExtendedInteger(const integerPart *, unsigned int,
+                                          bool, roundingMode);
+  opStatus convertFromString(StringRef, roundingMode);
+  APInt bitcastToAPInt() const;
+  double convertToDouble() const;
+  float convertToFloat() const;
+
+  /// @}
+
+  /// The definition of equality is not straightforward for floating point, so
+  /// we won't use operator==.  Use one of the following, or write whatever it
+  /// is you really mean.
+  bool operator==(const IEEEFloat &) const = delete;
+
+  /// IEEE comparison with another floating point number (NaNs compare
+  /// unordered, 0==-0).
+  cmpResult compare(const IEEEFloat &) const;
+
+  /// Bitwise comparison for equality (QNaNs compare equal, 0!=-0).
+  bool bitwiseIsEqual(const IEEEFloat &) const;
+
+  /// Write out a hexadecimal representation of the floating point value to DST,
+  /// which must be of sufficient size, in the C99 form [-]0xh.hhhhp[+-]d.
+  /// Return the number of characters written, excluding the terminating NUL.
+  unsigned int convertToHexString(char *dst, unsigned int hexDigits,
+                                  bool upperCase, roundingMode) const;
+
+  /// \name IEEE-754R 5.7.2 General operations.
+  /// @{
+
+  /// IEEE-754R isSignMinus: Returns true if and only if the current value is
+  /// negative.
+  ///
+  /// This applies to zeros and NaNs as well.
+  bool isNegative() const { return sign; }
+
+  /// IEEE-754R isNormal: Returns true if and only if the current value is normal.
+  ///
+  /// This implies that the current value of the float is not zero, subnormal,
+  /// infinite, or NaN following the definition of normality from IEEE-754R.
+  bool isNormal() const { return !isDenormal() && isFiniteNonZero(); }
+
+  /// Returns true if and only if the current value is zero, subnormal, or
+  /// normal.
+  ///
+  /// This means that the value is not infinite or NaN.
+  bool isFinite() const { return !isNaN() && !isInfinity(); }
+
+  /// Returns true if and only if the float is plus or minus zero.
+  bool isZero() const { return category == fcZero; }
+
+  /// IEEE-754R isSubnormal(): Returns true if and only if the float is a
+  /// denormal.
+  bool isDenormal() const;
+
+  /// IEEE-754R isInfinite(): Returns true if and only if the float is infinity.
+  bool isInfinity() const { return category == fcInfinity; }
+
+  /// Returns true if and only if the float is a quiet or signaling NaN.
+  bool isNaN() const { return category == fcNaN; }
+
+  /// Returns true if and only if the float is a signaling NaN.
+  bool isSignaling() const;
+
+  /// @}
+
+  /// \name Simple Queries
+  /// @{
+
+  fltCategory getCategory() const { return category; }
+  const fltSemantics &getSemantics() const { return *semantics; }
+  bool isNonZero() const { return category != fcZero; }
+  bool isFiniteNonZero() const { return isFinite() && !isZero(); }
+  bool isPosZero() const { return isZero() && !isNegative(); }
+  bool isNegZero() const { return isZero() && isNegative(); }
+
+  /// Returns true if and only if the number has the smallest possible non-zero
+  /// magnitude in the current semantics.
+  bool isSmallest() const;
+
+  /// Returns true if and only if the number has the largest possible finite
+  /// magnitude in the current semantics.
+  bool isLargest() const;
+
+  /// Returns true if and only if the number is an exact integer.
+  bool isInteger() const;
+
+  /// @}
+
+  IEEEFloat &operator=(const IEEEFloat &);
+  IEEEFloat &operator=(IEEEFloat &&);
+
+  /// Overload to compute a hash code for an APFloat value.
+  ///
+  /// Note that the use of hash codes for floating point values is in general
+  /// frought with peril. Equality is hard to define for these values. For
+  /// example, should negative and positive zero hash to different codes? Are
+  /// they equal or not? This hash value implementation specifically
+  /// emphasizes producing different codes for different inputs in order to
+  /// be used in canonicalization and memoization. As such, equality is
+  /// bitwiseIsEqual, and 0 != -0.
+  friend hash_code hash_value(const IEEEFloat &Arg);
+
+  /// Converts this value into a decimal string.
+  ///
+  /// \param FormatPrecision The maximum number of digits of
+  ///   precision to output.  If there are fewer digits available,
+  ///   zero padding will not be used unless the value is
+  ///   integral and small enough to be expressed in
+  ///   FormatPrecision digits.  0 means to use the natural
+  ///   precision of the number.
+  /// \param FormatMaxPadding The maximum number of zeros to
+  ///   consider inserting before falling back to scientific
+  ///   notation.  0 means to always use scientific notation.
+  ///
+  /// \param TruncateZero Indicate whether to remove the trailing zero in
+  ///   fraction part or not. Also setting this parameter to false forcing
+  ///   producing of output more similar to default printf behavior.
+  ///   Specifically the lower e is used as exponent delimiter and exponent
+  ///   always contains no less than two digits.
+  ///
+  /// Number       Precision    MaxPadding      Result
+  /// ------       ---------    ----------      ------
+  /// 1.01E+4              5             2       10100
+  /// 1.01E+4              4             2       1.01E+4
+  /// 1.01E+4              5             1       1.01E+4
+  /// 1.01E-2              5             2       0.0101
+  /// 1.01E-2              4             2       0.0101
+  /// 1.01E-2              4             1       1.01E-2
+  void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision = 0,
+                unsigned FormatMaxPadding = 3, bool TruncateZero = true) const;
+
+  /// If this value has an exact multiplicative inverse, store it in inv and
+  /// return true.
+  bool getExactInverse(APFloat *inv) const;
+
+  /// Returns the exponent of the internal representation of the APFloat.
+  ///
+  /// Because the radix of APFloat is 2, this is equivalent to floor(log2(x)).
+  /// For special APFloat values, this returns special error codes:
+  ///
+  ///   NaN -> \c IEK_NaN
+  ///   0   -> \c IEK_Zero
+  ///   Inf -> \c IEK_Inf
+  ///
+  friend int ilogb(const IEEEFloat &Arg);
+
+  /// Returns: X * 2^Exp for integral exponents.
+  friend IEEEFloat scalbn(IEEEFloat X, int Exp, roundingMode);
+
+  friend IEEEFloat frexp(const IEEEFloat &X, int &Exp, roundingMode);
+
+  /// \name Special value setters.
+  /// @{
+
+  void makeLargest(bool Neg = false);
+  void makeSmallest(bool Neg = false);
+  void makeNaN(bool SNaN = false, bool Neg = false,
+               const APInt *fill = nullptr);
+  void makeInf(bool Neg = false);
+  void makeZero(bool Neg = false);
+  void makeQuiet();
+
+  /// Returns the smallest (by magnitude) normalized finite number in the given
+  /// semantics.
+  ///
+  /// \param Negative - True iff the number should be negative
+  void makeSmallestNormalized(bool Negative = false);
+
+  /// @}
+
+  cmpResult compareAbsoluteValue(const IEEEFloat &) const;
+
+private:
+  /// \name Simple Queries
+  /// @{
+
+  integerPart *significandParts();
+  const integerPart *significandParts() const;
+  unsigned int partCount() const;
+
+  /// @}
+
+  /// \name Significand operations.
+  /// @{
+
+  integerPart addSignificand(const IEEEFloat &);
+  integerPart subtractSignificand(const IEEEFloat &, integerPart);
+  lostFraction addOrSubtractSignificand(const IEEEFloat &, bool subtract);
+  lostFraction multiplySignificand(const IEEEFloat &, const IEEEFloat *);
+  lostFraction divideSignificand(const IEEEFloat &);
+  void incrementSignificand();
+  void initialize(const fltSemantics *);
+  void shiftSignificandLeft(unsigned int);
+  lostFraction shiftSignificandRight(unsigned int);
+  unsigned int significandLSB() const;
+  unsigned int significandMSB() const;
+  void zeroSignificand();
+  /// Return true if the significand excluding the integral bit is all ones.
+  bool isSignificandAllOnes() const;
+  /// Return true if the significand excluding the integral bit is all zeros.
+  bool isSignificandAllZeros() const;
+
+  /// @}
+
+  /// \name Arithmetic on special values.
+  /// @{
+
+  opStatus addOrSubtractSpecials(const IEEEFloat &, bool subtract);
+  opStatus divideSpecials(const IEEEFloat &);
+  opStatus multiplySpecials(const IEEEFloat &);
+  opStatus modSpecials(const IEEEFloat &);
+
+  /// @}
+
+  /// \name Miscellany
+  /// @{
+
+  bool convertFromStringSpecials(StringRef str);
+  opStatus normalize(roundingMode, lostFraction);
+  opStatus addOrSubtract(const IEEEFloat &, roundingMode, bool subtract);
+  opStatus handleOverflow(roundingMode);
+  bool roundAwayFromZero(roundingMode, lostFraction, unsigned int) const;
+  opStatus convertToSignExtendedInteger(MutableArrayRef<integerPart>,
+                                        unsigned int, bool, roundingMode,
+                                        bool *) const;
+  opStatus convertFromUnsignedParts(const integerPart *, unsigned int,
+                                    roundingMode);
+  opStatus convertFromHexadecimalString(StringRef, roundingMode);
+  opStatus convertFromDecimalString(StringRef, roundingMode);
+  char *convertNormalToHexString(char *, unsigned int, bool,
+                                 roundingMode) const;
+  opStatus roundSignificandWithExponent(const integerPart *, unsigned int, int,
+                                        roundingMode);
+
+  /// @}
+
+  APInt convertHalfAPFloatToAPInt() const;
+  APInt convertFloatAPFloatToAPInt() const;
+  APInt convertDoubleAPFloatToAPInt() const;
+  APInt convertQuadrupleAPFloatToAPInt() const;
+  APInt convertF80LongDoubleAPFloatToAPInt() const;
+  APInt convertPPCDoubleDoubleAPFloatToAPInt() const;
+  void initFromAPInt(const fltSemantics *Sem, const APInt &api);
+  void initFromHalfAPInt(const APInt &api);
+  void initFromFloatAPInt(const APInt &api);
+  void initFromDoubleAPInt(const APInt &api);
+  void initFromQuadrupleAPInt(const APInt &api);
+  void initFromF80LongDoubleAPInt(const APInt &api);
+  void initFromPPCDoubleDoubleAPInt(const APInt &api);
+
+  void assign(const IEEEFloat &);
+  void copySignificand(const IEEEFloat &);
+  void freeSignificand();
+
+  /// Note: this must be the first data member.
+  /// The semantics that this value obeys.
+  const fltSemantics *semantics;
+
+  /// A binary fraction with an explicit integer bit.
+  ///
+  /// The significand must be at least one bit wider than the target precision.
+  union Significand {
+    integerPart part;
+    integerPart *parts;
+  } significand;
+
+  /// The signed unbiased exponent of the value.
+  ExponentType exponent;
+
+  /// What kind of floating point number this is.
+  ///
+  /// Only 2 bits are required, but VisualStudio incorrectly sign extends it.
+  /// Using the extra bit keeps it from failing under VisualStudio.
+  fltCategory category : 3;
+
+  /// Sign bit of the number.
+  unsigned int sign : 1;
+};
+
+hash_code hash_value(const IEEEFloat &Arg);
+int ilogb(const IEEEFloat &Arg);
+IEEEFloat scalbn(IEEEFloat X, int Exp, IEEEFloat::roundingMode);
+IEEEFloat frexp(const IEEEFloat &Val, int &Exp, IEEEFloat::roundingMode RM);
+
+// This mode implements more precise float in terms of two APFloats.
+// The interface and layout is designed for arbitray underlying semantics,
+// though currently only PPCDoubleDouble semantics are supported, whose
+// corresponding underlying semantics are IEEEdouble.
+class DoubleAPFloat final : public APFloatBase {
+  // Note: this must be the first data member.
+  const fltSemantics *Semantics;
+  std::unique_ptr<APFloat[]> Floats;
+
+  opStatus addImpl(const APFloat &a, const APFloat &aa, const APFloat &c,
+                   const APFloat &cc, roundingMode RM);
+
+  opStatus addWithSpecial(const DoubleAPFloat &LHS, const DoubleAPFloat &RHS,
+                          DoubleAPFloat &Out, roundingMode RM);
+
+public:
+  DoubleAPFloat(const fltSemantics &S);
+  DoubleAPFloat(const fltSemantics &S, uninitializedTag);
+  DoubleAPFloat(const fltSemantics &S, integerPart);
+  DoubleAPFloat(const fltSemantics &S, const APInt &I);
+  DoubleAPFloat(const fltSemantics &S, APFloat &&First, APFloat &&Second);
+  DoubleAPFloat(const DoubleAPFloat &RHS);
+  DoubleAPFloat(DoubleAPFloat &&RHS);
+
+  DoubleAPFloat &operator=(const DoubleAPFloat &RHS);
+
+  DoubleAPFloat &operator=(DoubleAPFloat &&RHS) {
+    if (this != &RHS) {
+      this->~DoubleAPFloat();
+      new (this) DoubleAPFloat(std::move(RHS));
+    }
+    return *this;
+  }
+
+  bool needsCleanup() const { return Floats != nullptr; }
+
+  APFloat &getFirst() { return Floats[0]; }
+  const APFloat &getFirst() const { return Floats[0]; }
+  APFloat &getSecond() { return Floats[1]; }
+  const APFloat &getSecond() const { return Floats[1]; }
+
+  opStatus add(const DoubleAPFloat &RHS, roundingMode RM);
+  opStatus subtract(const DoubleAPFloat &RHS, roundingMode RM);
+  opStatus multiply(const DoubleAPFloat &RHS, roundingMode RM);
+  opStatus divide(const DoubleAPFloat &RHS, roundingMode RM);
+  opStatus remainder(const DoubleAPFloat &RHS);
+  opStatus mod(const DoubleAPFloat &RHS);
+  opStatus fusedMultiplyAdd(const DoubleAPFloat &Multiplicand,
+                            const DoubleAPFloat &Addend, roundingMode RM);
+  opStatus roundToIntegral(roundingMode RM);
+  void changeSign();
+  cmpResult compareAbsoluteValue(const DoubleAPFloat &RHS) const;
+
+  fltCategory getCategory() const;
+  bool isNegative() const;
+
+  void makeInf(bool Neg);
+  void makeZero(bool Neg);
+  void makeLargest(bool Neg);
+  void makeSmallest(bool Neg);
+  void makeSmallestNormalized(bool Neg);
+  void makeNaN(bool SNaN, bool Neg, const APInt *fill);
+
+  cmpResult compare(const DoubleAPFloat &RHS) const;
+  bool bitwiseIsEqual(const DoubleAPFloat &RHS) const;
+  APInt bitcastToAPInt() const;
+  opStatus convertFromString(StringRef, roundingMode);
+  opStatus next(bool nextDown);
+
+  opStatus convertToInteger(MutableArrayRef<integerPart> Input,
+                            unsigned int Width, bool IsSigned, roundingMode RM,
+                            bool *IsExact) const;
+  opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM);
+  opStatus convertFromSignExtendedInteger(const integerPart *Input,
+                                          unsigned int InputSize, bool IsSigned,
+                                          roundingMode RM);
+  opStatus convertFromZeroExtendedInteger(const integerPart *Input,
+                                          unsigned int InputSize, bool IsSigned,
+                                          roundingMode RM);
+  unsigned int convertToHexString(char *DST, unsigned int HexDigits,
+                                  bool UpperCase, roundingMode RM) const;
+
+  bool isDenormal() const;
+  bool isSmallest() const;
+  bool isLargest() const;
+  bool isInteger() const;
+
+  void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision,
+                unsigned FormatMaxPadding, bool TruncateZero = true) const;
+
+  bool getExactInverse(APFloat *inv) const;
+
+  friend int ilogb(const DoubleAPFloat &Arg);
+  friend DoubleAPFloat scalbn(DoubleAPFloat X, int Exp, roundingMode);
+  friend DoubleAPFloat frexp(const DoubleAPFloat &X, int &Exp, roundingMode);
+  friend hash_code hash_value(const DoubleAPFloat &Arg);
+};
+
+hash_code hash_value(const DoubleAPFloat &Arg);
+
+} // End detail namespace
+
+// This is a interface class that is currently forwarding functionalities from
+// detail::IEEEFloat.
+class APFloat : public APFloatBase {
+  typedef detail::IEEEFloat IEEEFloat;
+  typedef detail::DoubleAPFloat DoubleAPFloat;
+
+  static_assert(std::is_standard_layout<IEEEFloat>::value, "");
+
+  union Storage {
+    const fltSemantics *semantics;
+    IEEEFloat IEEE;
+    DoubleAPFloat Double;
+
+    explicit Storage(IEEEFloat F, const fltSemantics &S);
+    explicit Storage(DoubleAPFloat F, const fltSemantics &S)
+        : Double(std::move(F)) {
+      assert(&S == &PPCDoubleDouble());
+    }
+
+    template <typename... ArgTypes>
+    Storage(const fltSemantics &Semantics, ArgTypes &&... Args) {
+      if (usesLayout<IEEEFloat>(Semantics)) {
+        new (&IEEE) IEEEFloat(Semantics, std::forward<ArgTypes>(Args)...);
+        return;
+      }
+      if (usesLayout<DoubleAPFloat>(Semantics)) {
+        new (&Double) DoubleAPFloat(Semantics, std::forward<ArgTypes>(Args)...);
+        return;
+      }
+      llvm_unreachable("Unexpected semantics");
+    }
+
+    ~Storage() {
+      if (usesLayout<IEEEFloat>(*semantics)) {
+        IEEE.~IEEEFloat();
+        return;
+      }
+      if (usesLayout<DoubleAPFloat>(*semantics)) {
+        Double.~DoubleAPFloat();
+        return;
+      }
+      llvm_unreachable("Unexpected semantics");
+    }
+
+    Storage(const Storage &RHS) {
+      if (usesLayout<IEEEFloat>(*RHS.semantics)) {
+        new (this) IEEEFloat(RHS.IEEE);
+        return;
+      }
+      if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+        new (this) DoubleAPFloat(RHS.Double);
+        return;
+      }
+      llvm_unreachable("Unexpected semantics");
+    }
+
+    Storage(Storage &&RHS) {
+      if (usesLayout<IEEEFloat>(*RHS.semantics)) {
+        new (this) IEEEFloat(std::move(RHS.IEEE));
+        return;
+      }
+      if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+        new (this) DoubleAPFloat(std::move(RHS.Double));
+        return;
+      }
+      llvm_unreachable("Unexpected semantics");
+    }
+
+    Storage &operator=(const Storage &RHS) {
+      if (usesLayout<IEEEFloat>(*semantics) &&
+          usesLayout<IEEEFloat>(*RHS.semantics)) {
+        IEEE = RHS.IEEE;
+      } else if (usesLayout<DoubleAPFloat>(*semantics) &&
+                 usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+        Double = RHS.Double;
+      } else if (this != &RHS) {
+        this->~Storage();
+        new (this) Storage(RHS);
+      }
+      return *this;
+    }
+
+    Storage &operator=(Storage &&RHS) {
+      if (usesLayout<IEEEFloat>(*semantics) &&
+          usesLayout<IEEEFloat>(*RHS.semantics)) {
+        IEEE = std::move(RHS.IEEE);
+      } else if (usesLayout<DoubleAPFloat>(*semantics) &&
+                 usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+        Double = std::move(RHS.Double);
+      } else if (this != &RHS) {
+        this->~Storage();
+        new (this) Storage(std::move(RHS));
+      }
+      return *this;
+    }
+  } U;
+
+  template <typename T> static bool usesLayout(const fltSemantics &Semantics) {
+    static_assert(std::is_same<T, IEEEFloat>::value ||
+                  std::is_same<T, DoubleAPFloat>::value, "");
+    if (std::is_same<T, DoubleAPFloat>::value) {
+      return &Semantics == &PPCDoubleDouble();
+    }
+    return &Semantics != &PPCDoubleDouble();
+  }
+
+  IEEEFloat &getIEEE() {
+    if (usesLayout<IEEEFloat>(*U.semantics))
+      return U.IEEE;
+    if (usesLayout<DoubleAPFloat>(*U.semantics))
+      return U.Double.getFirst().U.IEEE;
+    llvm_unreachable("Unexpected semantics");
+  }
+
+  const IEEEFloat &getIEEE() const {
+    if (usesLayout<IEEEFloat>(*U.semantics))
+      return U.IEEE;
+    if (usesLayout<DoubleAPFloat>(*U.semantics))
+      return U.Double.getFirst().U.IEEE;
+    llvm_unreachable("Unexpected semantics");
+  }
+
+  void makeZero(bool Neg) { APFLOAT_DISPATCH_ON_SEMANTICS(makeZero(Neg)); }
+
+  void makeInf(bool Neg) { APFLOAT_DISPATCH_ON_SEMANTICS(makeInf(Neg)); }
+
+  void makeNaN(bool SNaN, bool Neg, const APInt *fill) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(makeNaN(SNaN, Neg, fill));
+  }
+
+  void makeLargest(bool Neg) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(makeLargest(Neg));
+  }
+
+  void makeSmallest(bool Neg) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(makeSmallest(Neg));
+  }
+
+  void makeSmallestNormalized(bool Neg) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(makeSmallestNormalized(Neg));
+  }
+
+  // FIXME: This is due to clang 3.3 (or older version) always checks for the
+  // default constructor in an array aggregate initialization, even if no
+  // elements in the array is default initialized.
+  APFloat() : U(IEEEdouble()) {
+    llvm_unreachable("This is a workaround for old clang.");
+  }
+
+  explicit APFloat(IEEEFloat F, const fltSemantics &S) : U(std::move(F), S) {}
+  explicit APFloat(DoubleAPFloat F, const fltSemantics &S)
+      : U(std::move(F), S) {}
+
+  cmpResult compareAbsoluteValue(const APFloat &RHS) const {
+    assert(&getSemantics() == &RHS.getSemantics() &&
+           "Should only compare APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.compareAbsoluteValue(RHS.U.IEEE);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.compareAbsoluteValue(RHS.U.Double);
+    llvm_unreachable("Unexpected semantics");
+  }
+
+public:
+  APFloat(const fltSemantics &Semantics) : U(Semantics) {}
+  APFloat(const fltSemantics &Semantics, StringRef S);
+  APFloat(const fltSemantics &Semantics, integerPart I) : U(Semantics, I) {}
+  // TODO: Remove this constructor. This isn't faster than the first one.
+  APFloat(const fltSemantics &Semantics, uninitializedTag)
+      : U(Semantics, uninitialized) {}
+  APFloat(const fltSemantics &Semantics, const APInt &I) : U(Semantics, I) {}
+  explicit APFloat(double d) : U(IEEEFloat(d), IEEEdouble()) {}
+  explicit APFloat(float f) : U(IEEEFloat(f), IEEEsingle()) {}
+  APFloat(const APFloat &RHS) = default;
+  APFloat(APFloat &&RHS) = default;
+
+  ~APFloat() = default;
+
+  bool needsCleanup() const { APFLOAT_DISPATCH_ON_SEMANTICS(needsCleanup()); }
+
+  /// Factory for Positive and Negative Zero.
+  ///
+  /// \param Negative True iff the number should be negative.
+  static APFloat getZero(const fltSemantics &Sem, bool Negative = false) {
+    APFloat Val(Sem, uninitialized);
+    Val.makeZero(Negative);
+    return Val;
+  }
+
+  /// Factory for Positive and Negative Infinity.
+  ///
+  /// \param Negative True iff the number should be negative.
+  static APFloat getInf(const fltSemantics &Sem, bool Negative = false) {
+    APFloat Val(Sem, uninitialized);
+    Val.makeInf(Negative);
+    return Val;
+  }
+
+  /// Factory for NaN values.
+  ///
+  /// \param Negative - True iff the NaN generated should be negative.
+  /// \param type - The unspecified fill bits for creating the NaN, 0 by
+  /// default.  The value is truncated as necessary.
+  static APFloat getNaN(const fltSemantics &Sem, bool Negative = false,
+                        unsigned type = 0) {
+    if (type) {
+      APInt fill(64, type);
+      return getQNaN(Sem, Negative, &fill);
+    } else {
+      return getQNaN(Sem, Negative, nullptr);
+    }
+  }
+
+  /// Factory for QNaN values.
+  static APFloat getQNaN(const fltSemantics &Sem, bool Negative = false,
+                         const APInt *payload = nullptr) {
+    APFloat Val(Sem, uninitialized);
+    Val.makeNaN(false, Negative, payload);
+    return Val;
+  }
+
+  /// Factory for SNaN values.
+  static APFloat getSNaN(const fltSemantics &Sem, bool Negative = false,
+                         const APInt *payload = nullptr) {
+    APFloat Val(Sem, uninitialized);
+    Val.makeNaN(true, Negative, payload);
+    return Val;
+  }
+
+  /// Returns the largest finite number in the given semantics.
+  ///
+  /// \param Negative - True iff the number should be negative
+  static APFloat getLargest(const fltSemantics &Sem, bool Negative = false) {
+    APFloat Val(Sem, uninitialized);
+    Val.makeLargest(Negative);
+    return Val;
+  }
+
+  /// Returns the smallest (by magnitude) finite number in the given semantics.
+  /// Might be denormalized, which implies a relative loss of precision.
+  ///
+  /// \param Negative - True iff the number should be negative
+  static APFloat getSmallest(const fltSemantics &Sem, bool Negative = false) {
+    APFloat Val(Sem, uninitialized);
+    Val.makeSmallest(Negative);
+    return Val;
+  }
+
+  /// Returns the smallest (by magnitude) normalized finite number in the given
+  /// semantics.
+  ///
+  /// \param Negative - True iff the number should be negative
+  static APFloat getSmallestNormalized(const fltSemantics &Sem,
+                                       bool Negative = false) {
+    APFloat Val(Sem, uninitialized);
+    Val.makeSmallestNormalized(Negative);
+    return Val;
+  }
+
+  /// Returns a float which is bitcasted from an all one value int.
+  ///
+  /// \param BitWidth - Select float type
+  /// \param isIEEE   - If 128 bit number, select between PPC and IEEE
+  static APFloat getAllOnesValue(unsigned BitWidth, bool isIEEE = false);
+
+  /// Used to insert APFloat objects, or objects that contain APFloat objects,
+  /// into FoldingSets.
+  void Profile(FoldingSetNodeID &NID) const;
+
+  opStatus add(const APFloat &RHS, roundingMode RM) {
+    assert(&getSemantics() == &RHS.getSemantics() &&
+           "Should only call on two APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.add(RHS.U.IEEE, RM);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.add(RHS.U.Double, RM);
+    llvm_unreachable("Unexpected semantics");
+  }
+  opStatus subtract(const APFloat &RHS, roundingMode RM) {
+    assert(&getSemantics() == &RHS.getSemantics() &&
+           "Should only call on two APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.subtract(RHS.U.IEEE, RM);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.subtract(RHS.U.Double, RM);
+    llvm_unreachable("Unexpected semantics");
+  }
+  opStatus multiply(const APFloat &RHS, roundingMode RM) {
+    assert(&getSemantics() == &RHS.getSemantics() &&
+           "Should only call on two APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.multiply(RHS.U.IEEE, RM);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.multiply(RHS.U.Double, RM);
+    llvm_unreachable("Unexpected semantics");
+  }
+  opStatus divide(const APFloat &RHS, roundingMode RM) {
+    assert(&getSemantics() == &RHS.getSemantics() &&
+           "Should only call on two APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.divide(RHS.U.IEEE, RM);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.divide(RHS.U.Double, RM);
+    llvm_unreachable("Unexpected semantics");
+  }
+  opStatus remainder(const APFloat &RHS) {
+    assert(&getSemantics() == &RHS.getSemantics() &&
+           "Should only call on two APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.remainder(RHS.U.IEEE);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.remainder(RHS.U.Double);
+    llvm_unreachable("Unexpected semantics");
+  }
+  opStatus mod(const APFloat &RHS) {
+    assert(&getSemantics() == &RHS.getSemantics() &&
+           "Should only call on two APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.mod(RHS.U.IEEE);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.mod(RHS.U.Double);
+    llvm_unreachable("Unexpected semantics");
+  }
+  opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend,
+                            roundingMode RM) {
+    assert(&getSemantics() == &Multiplicand.getSemantics() &&
+           "Should only call on APFloats with the same semantics");
+    assert(&getSemantics() == &Addend.getSemantics() &&
+           "Should only call on APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.fusedMultiplyAdd(Multiplicand.U.IEEE, Addend.U.IEEE, RM);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.fusedMultiplyAdd(Multiplicand.U.Double, Addend.U.Double,
+                                       RM);
+    llvm_unreachable("Unexpected semantics");
+  }
+  opStatus roundToIntegral(roundingMode RM) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(roundToIntegral(RM));
+  }
+
+  // TODO: bool parameters are not readable and a source of bugs.
+  // Do something.
+  opStatus next(bool nextDown) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(next(nextDown));
+  }
+
+  /// Add two APFloats, rounding ties to the nearest even.
+  /// No error checking.
+  APFloat operator+(const APFloat &RHS) const {
+    APFloat Result(*this);
+    (void)Result.add(RHS, rmNearestTiesToEven);
+    return Result;
+  }
+
+  /// Subtract two APFloats, rounding ties to the nearest even.
+  /// No error checking.
+  APFloat operator-(const APFloat &RHS) const {
+    APFloat Result(*this);
+    (void)Result.subtract(RHS, rmNearestTiesToEven);
+    return Result;
+  }
+
+  /// Multiply two APFloats, rounding ties to the nearest even.
+  /// No error checking.
+  APFloat operator*(const APFloat &RHS) const {
+    APFloat Result(*this);
+    (void)Result.multiply(RHS, rmNearestTiesToEven);
+    return Result;
+  }
+
+  /// Divide the first APFloat by the second, rounding ties to the nearest even.
+  /// No error checking.
+  APFloat operator/(const APFloat &RHS) const {
+    APFloat Result(*this);
+    (void)Result.divide(RHS, rmNearestTiesToEven);
+    return Result;
+  }
+
+  void changeSign() { APFLOAT_DISPATCH_ON_SEMANTICS(changeSign()); }
+  void clearSign() {
+    if (isNegative())
+      changeSign();
+  }
+  void copySign(const APFloat &RHS) {
+    if (isNegative() != RHS.isNegative())
+      changeSign();
+  }
+
+  /// A static helper to produce a copy of an APFloat value with its sign
+  /// copied from some other APFloat.
+  static APFloat copySign(APFloat Value, const APFloat &Sign) {
+    Value.copySign(Sign);
+    return Value;
+  }
+
+  opStatus convert(const fltSemantics &ToSemantics, roundingMode RM,
+                   bool *losesInfo);
+  opStatus convertToInteger(MutableArrayRef<integerPart> Input,
+                            unsigned int Width, bool IsSigned, roundingMode RM,
+                            bool *IsExact) const {
+    APFLOAT_DISPATCH_ON_SEMANTICS(
+        convertToInteger(Input, Width, IsSigned, RM, IsExact));
+  }
+  opStatus convertToInteger(APSInt &Result, roundingMode RM,
+                            bool *IsExact) const;
+  opStatus convertFromAPInt(const APInt &Input, bool IsSigned,
+                            roundingMode RM) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(convertFromAPInt(Input, IsSigned, RM));
+  }
+  opStatus convertFromSignExtendedInteger(const integerPart *Input,
+                                          unsigned int InputSize, bool IsSigned,
+                                          roundingMode RM) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(
+        convertFromSignExtendedInteger(Input, InputSize, IsSigned, RM));
+  }
+  opStatus convertFromZeroExtendedInteger(const integerPart *Input,
+                                          unsigned int InputSize, bool IsSigned,
+                                          roundingMode RM) {
+    APFLOAT_DISPATCH_ON_SEMANTICS(
+        convertFromZeroExtendedInteger(Input, InputSize, IsSigned, RM));
+  }
+  opStatus convertFromString(StringRef, roundingMode);
+  APInt bitcastToAPInt() const {
+    APFLOAT_DISPATCH_ON_SEMANTICS(bitcastToAPInt());
+  }
+  double convertToDouble() const { return getIEEE().convertToDouble(); }
+  float convertToFloat() const { return getIEEE().convertToFloat(); }
+
+  bool operator==(const APFloat &) const = delete;
+
+  cmpResult compare(const APFloat &RHS) const {
+    assert(&getSemantics() == &RHS.getSemantics() &&
+           "Should only compare APFloats with the same semantics");
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.compare(RHS.U.IEEE);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.compare(RHS.U.Double);
+    llvm_unreachable("Unexpected semantics");
+  }
+
+  bool bitwiseIsEqual(const APFloat &RHS) const {
+    if (&getSemantics() != &RHS.getSemantics())
+      return false;
+    if (usesLayout<IEEEFloat>(getSemantics()))
+      return U.IEEE.bitwiseIsEqual(RHS.U.IEEE);
+    if (usesLayout<DoubleAPFloat>(getSemantics()))
+      return U.Double.bitwiseIsEqual(RHS.U.Double);
+    llvm_unreachable("Unexpected semantics");
+  }
+
+  /// We don't rely on operator== working on double values, as
+  /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
+  /// As such, this method can be used to do an exact bit-for-bit comparison of
+  /// two floating point values.
+  ///
+  /// We leave the version with the double argument here because it's just so
+  /// convenient to write "2.0" and the like.  Without this function we'd
+  /// have to duplicate its logic everywhere it's called.
+  bool isExactlyValue(double V) const {
+    bool ignored;
+    APFloat Tmp(V);
+    Tmp.convert(getSemantics(), APFloat::rmNearestTiesToEven, &ignored);
+    return bitwiseIsEqual(Tmp);
+  }
+
+  unsigned int convertToHexString(char *DST, unsigned int HexDigits,
+                                  bool UpperCase, roundingMode RM) const {
+    APFLOAT_DISPATCH_ON_SEMANTICS(
+        convertToHexString(DST, HexDigits, UpperCase, RM));
+  }
+
+  bool isZero() const { return getCategory() == fcZero; }
+  bool isInfinity() const { return getCategory() == fcInfinity; }
+  bool isNaN() const { return getCategory() == fcNaN; }
+
+  bool isNegative() const { return getIEEE().isNegative(); }
+  bool isDenormal() const { APFLOAT_DISPATCH_ON_SEMANTICS(isDenormal()); }
+  bool isSignaling() const { return getIEEE().isSignaling(); }
+
+  bool isNormal() const { return !isDenormal() && isFiniteNonZero(); }
+  bool isFinite() const { return !isNaN() && !isInfinity(); }
+
+  fltCategory getCategory() const { return getIEEE().getCategory(); }
+  const fltSemantics &getSemantics() const { return *U.semantics; }
+  bool isNonZero() const { return !isZero(); }
+  bool isFiniteNonZero() const { return isFinite() && !isZero(); }
+  bool isPosZero() const { return isZero() && !isNegative(); }
+  bool isNegZero() const { return isZero() && isNegative(); }
+  bool isSmallest() const { APFLOAT_DISPATCH_ON_SEMANTICS(isSmallest()); }
+  bool isLargest() const { APFLOAT_DISPATCH_ON_SEMANTICS(isLargest()); }
+  bool isInteger() const { APFLOAT_DISPATCH_ON_SEMANTICS(isInteger()); }
+
+  APFloat &operator=(const APFloat &RHS) = default;
+  APFloat &operator=(APFloat &&RHS) = default;
+
+  void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision = 0,
+                unsigned FormatMaxPadding = 3, bool TruncateZero = true) const {
+    APFLOAT_DISPATCH_ON_SEMANTICS(
+        toString(Str, FormatPrecision, FormatMaxPadding, TruncateZero));
+  }
+
+  void print(raw_ostream &) const;
+  void dump() const;
+
+  bool getExactInverse(APFloat *inv) const {
+    APFLOAT_DISPATCH_ON_SEMANTICS(getExactInverse(inv));
+  }
+
+  friend hash_code hash_value(const APFloat &Arg);
+  friend int ilogb(const APFloat &Arg) { return ilogb(Arg.getIEEE()); }
+  friend APFloat scalbn(APFloat X, int Exp, roundingMode RM);
+  friend APFloat frexp(const APFloat &X, int &Exp, roundingMode RM);
+  friend IEEEFloat;
+  friend DoubleAPFloat;
+};
+
+/// See friend declarations above.
+///
+/// These additional declarations are required in order to compile LLVM with IBM
+/// xlC compiler.
+hash_code hash_value(const APFloat &Arg);
+inline APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM) {
+  if (APFloat::usesLayout<detail::IEEEFloat>(X.getSemantics()))
+    return APFloat(scalbn(X.U.IEEE, Exp, RM), X.getSemantics());
+  if (APFloat::usesLayout<detail::DoubleAPFloat>(X.getSemantics()))
+    return APFloat(scalbn(X.U.Double, Exp, RM), X.getSemantics());
+  llvm_unreachable("Unexpected semantics");
+}
+
+/// Equivalent of C standard library function.
+///
+/// While the C standard says Exp is an unspecified value for infinity and nan,
+/// this returns INT_MAX for infinities, and INT_MIN for NaNs.
+inline APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM) {
+  if (APFloat::usesLayout<detail::IEEEFloat>(X.getSemantics()))
+    return APFloat(frexp(X.U.IEEE, Exp, RM), X.getSemantics());
+  if (APFloat::usesLayout<detail::DoubleAPFloat>(X.getSemantics()))
+    return APFloat(frexp(X.U.Double, Exp, RM), X.getSemantics());
+  llvm_unreachable("Unexpected semantics");
+}
+/// Returns the absolute value of the argument.
+inline APFloat abs(APFloat X) {
+  X.clearSign();
+  return X;
+}
+
+/// \brief Returns the negated value of the argument.
+inline APFloat neg(APFloat X) {
+  X.changeSign();
+  return X;
+}
+
+/// Implements IEEE minNum semantics. Returns the smaller of the 2 arguments if
+/// both are not NaN. If either argument is a NaN, returns the other argument.
+LLVM_READONLY
+inline APFloat minnum(const APFloat &A, const APFloat &B) {
+  if (A.isNaN())
+    return B;
+  if (B.isNaN())
+    return A;
+  return (B.compare(A) == APFloat::cmpLessThan) ? B : A;
+}
+
+/// Implements IEEE maxNum semantics. Returns the larger of the 2 arguments if
+/// both are not NaN. If either argument is a NaN, returns the other argument.
+LLVM_READONLY
+inline APFloat maxnum(const APFloat &A, const APFloat &B) {
+  if (A.isNaN())
+    return B;
+  if (B.isNaN())
+    return A;
+  return (A.compare(B) == APFloat::cmpLessThan) ? B : A;
+}
+
+} // namespace llvm
+
+#undef APFLOAT_DISPATCH_ON_SEMANTICS
+#endif // LLVM_ADT_APFLOAT_H
diff --git a/linux-x64/clang/include/llvm/ADT/APInt.h b/linux-x64/clang/include/llvm/ADT/APInt.h
new file mode 100644
index 0000000..118c62e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/APInt.h
@@ -0,0 +1,2156 @@
+//===-- llvm/ADT/APInt.h - For Arbitrary Precision Integer -----*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements a class to represent arbitrary precision
+/// integral constant values and operations on them.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_APINT_H
+#define LLVM_ADT_APINT_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+#include <climits>
+#include <cstring>
+#include <string>
+
+namespace llvm {
+class FoldingSetNodeID;
+class StringRef;
+class hash_code;
+class raw_ostream;
+
+template <typename T> class SmallVectorImpl;
+template <typename T> class ArrayRef;
+
+class APInt;
+
+inline APInt operator-(APInt);
+
+//===----------------------------------------------------------------------===//
+//                              APInt Class
+//===----------------------------------------------------------------------===//
+
+/// \brief Class for arbitrary precision integers.
+///
+/// APInt is a functional replacement for common case unsigned integer type like
+/// "unsigned", "unsigned long" or "uint64_t", but also allows non-byte-width
+/// integer sizes and large integer value types such as 3-bits, 15-bits, or more
+/// than 64-bits of precision. APInt provides a variety of arithmetic operators
+/// and methods to manipulate integer values of any bit-width. It supports both
+/// the typical integer arithmetic and comparison operations as well as bitwise
+/// manipulation.
+///
+/// The class has several invariants worth noting:
+///   * All bit, byte, and word positions are zero-based.
+///   * Once the bit width is set, it doesn't change except by the Truncate,
+///     SignExtend, or ZeroExtend operations.
+///   * All binary operators must be on APInt instances of the same bit width.
+///     Attempting to use these operators on instances with different bit
+///     widths will yield an assertion.
+///   * The value is stored canonically as an unsigned value. For operations
+///     where it makes a difference, there are both signed and unsigned variants
+///     of the operation. For example, sdiv and udiv. However, because the bit
+///     widths must be the same, operations such as Mul and Add produce the same
+///     results regardless of whether the values are interpreted as signed or
+///     not.
+///   * In general, the class tries to follow the style of computation that LLVM
+///     uses in its IR. This simplifies its use for LLVM.
+///
+class LLVM_NODISCARD APInt {
+public:
+  typedef uint64_t WordType;
+
+  /// This enum is used to hold the constants we needed for APInt.
+  enum : unsigned {
+    /// Byte size of a word.
+    APINT_WORD_SIZE = sizeof(WordType),
+    /// Bits in a word.
+    APINT_BITS_PER_WORD = APINT_WORD_SIZE * CHAR_BIT
+  };
+
+  static const WordType WORD_MAX = ~WordType(0);
+
+private:
+  /// This union is used to store the integer value. When the
+  /// integer bit-width <= 64, it uses VAL, otherwise it uses pVal.
+  union {
+    uint64_t VAL;   ///< Used to store the <= 64 bits integer value.
+    uint64_t *pVal; ///< Used to store the >64 bits integer value.
+  } U;
+
+  unsigned BitWidth; ///< The number of bits in this APInt.
+
+  friend struct DenseMapAPIntKeyInfo;
+
+  friend class APSInt;
+
+  /// \brief Fast internal constructor
+  ///
+  /// This constructor is used only internally for speed of construction of
+  /// temporaries. It is unsafe for general use so it is not public.
+  APInt(uint64_t *val, unsigned bits) : BitWidth(bits) {
+    U.pVal = val;
+  }
+
+  /// \brief Determine if this APInt just has one word to store value.
+  ///
+  /// \returns true if the number of bits <= 64, false otherwise.
+  bool isSingleWord() const { return BitWidth <= APINT_BITS_PER_WORD; }
+
+  /// \brief Determine which word a bit is in.
+  ///
+  /// \returns the word position for the specified bit position.
+  static unsigned whichWord(unsigned bitPosition) {
+    return bitPosition / APINT_BITS_PER_WORD;
+  }
+
+  /// \brief Determine which bit in a word a bit is in.
+  ///
+  /// \returns the bit position in a word for the specified bit position
+  /// in the APInt.
+  static unsigned whichBit(unsigned bitPosition) {
+    return bitPosition % APINT_BITS_PER_WORD;
+  }
+
+  /// \brief Get a single bit mask.
+  ///
+  /// \returns a uint64_t with only bit at "whichBit(bitPosition)" set
+  /// This method generates and returns a uint64_t (word) mask for a single
+  /// bit at a specific bit position. This is used to mask the bit in the
+  /// corresponding word.
+  static uint64_t maskBit(unsigned bitPosition) {
+    return 1ULL << whichBit(bitPosition);
+  }
+
+  /// \brief Clear unused high order bits
+  ///
+  /// This method is used internally to clear the top "N" bits in the high order
+  /// word that are not used by the APInt. This is needed after the most
+  /// significant word is assigned a value to ensure that those bits are
+  /// zero'd out.
+  APInt &clearUnusedBits() {
+    // Compute how many bits are used in the final word
+    unsigned WordBits = ((BitWidth-1) % APINT_BITS_PER_WORD) + 1;
+
+    // Mask out the high bits.
+    uint64_t mask = WORD_MAX >> (APINT_BITS_PER_WORD - WordBits);
+    if (isSingleWord())
+      U.VAL &= mask;
+    else
+      U.pVal[getNumWords() - 1] &= mask;
+    return *this;
+  }
+
+  /// \brief Get the word corresponding to a bit position
+  /// \returns the corresponding word for the specified bit position.
+  uint64_t getWord(unsigned bitPosition) const {
+    return isSingleWord() ? U.VAL : U.pVal[whichWord(bitPosition)];
+  }
+
+  /// Utility method to change the bit width of this APInt to new bit width,
+  /// allocating and/or deallocating as necessary. There is no guarantee on the
+  /// value of any bits upon return. Caller should populate the bits after.
+  void reallocate(unsigned NewBitWidth);
+
+  /// \brief Convert a char array into an APInt
+  ///
+  /// \param radix 2, 8, 10, 16, or 36
+  /// Converts a string into a number.  The string must be non-empty
+  /// and well-formed as a number of the given base. The bit-width
+  /// must be sufficient to hold the result.
+  ///
+  /// This is used by the constructors that take string arguments.
+  ///
+  /// StringRef::getAsInteger is superficially similar but (1) does
+  /// not assume that the string is well-formed and (2) grows the
+  /// result to hold the input.
+  void fromString(unsigned numBits, StringRef str, uint8_t radix);
+
+  /// \brief An internal division function for dividing APInts.
+  ///
+  /// This is used by the toString method to divide by the radix. It simply
+  /// provides a more convenient form of divide for internal use since KnuthDiv
+  /// has specific constraints on its inputs. If those constraints are not met
+  /// then it provides a simpler form of divide.
+  static void divide(const WordType *LHS, unsigned lhsWords,
+                     const WordType *RHS, unsigned rhsWords, WordType *Quotient,
+                     WordType *Remainder);
+
+  /// out-of-line slow case for inline constructor
+  void initSlowCase(uint64_t val, bool isSigned);
+
+  /// shared code between two array constructors
+  void initFromArray(ArrayRef<uint64_t> array);
+
+  /// out-of-line slow case for inline copy constructor
+  void initSlowCase(const APInt &that);
+
+  /// out-of-line slow case for shl
+  void shlSlowCase(unsigned ShiftAmt);
+
+  /// out-of-line slow case for lshr.
+  void lshrSlowCase(unsigned ShiftAmt);
+
+  /// out-of-line slow case for ashr.
+  void ashrSlowCase(unsigned ShiftAmt);
+
+  /// out-of-line slow case for operator=
+  void AssignSlowCase(const APInt &RHS);
+
+  /// out-of-line slow case for operator==
+  bool EqualSlowCase(const APInt &RHS) const LLVM_READONLY;
+
+  /// out-of-line slow case for countLeadingZeros
+  unsigned countLeadingZerosSlowCase() const LLVM_READONLY;
+
+  /// out-of-line slow case for countLeadingOnes.
+  unsigned countLeadingOnesSlowCase() const LLVM_READONLY;
+
+  /// out-of-line slow case for countTrailingZeros.
+  unsigned countTrailingZerosSlowCase() const LLVM_READONLY;
+
+  /// out-of-line slow case for countTrailingOnes
+  unsigned countTrailingOnesSlowCase() const LLVM_READONLY;
+
+  /// out-of-line slow case for countPopulation
+  unsigned countPopulationSlowCase() const LLVM_READONLY;
+
+  /// out-of-line slow case for intersects.
+  bool intersectsSlowCase(const APInt &RHS) const LLVM_READONLY;
+
+  /// out-of-line slow case for isSubsetOf.
+  bool isSubsetOfSlowCase(const APInt &RHS) const LLVM_READONLY;
+
+  /// out-of-line slow case for setBits.
+  void setBitsSlowCase(unsigned loBit, unsigned hiBit);
+
+  /// out-of-line slow case for flipAllBits.
+  void flipAllBitsSlowCase();
+
+  /// out-of-line slow case for operator&=.
+  void AndAssignSlowCase(const APInt& RHS);
+
+  /// out-of-line slow case for operator|=.
+  void OrAssignSlowCase(const APInt& RHS);
+
+  /// out-of-line slow case for operator^=.
+  void XorAssignSlowCase(const APInt& RHS);
+
+  /// Unsigned comparison. Returns -1, 0, or 1 if this APInt is less than, equal
+  /// to, or greater than RHS.
+  int compare(const APInt &RHS) const LLVM_READONLY;
+
+  /// Signed comparison. Returns -1, 0, or 1 if this APInt is less than, equal
+  /// to, or greater than RHS.
+  int compareSigned(const APInt &RHS) const LLVM_READONLY;
+
+public:
+  /// \name Constructors
+  /// @{
+
+  /// \brief Create a new APInt of numBits width, initialized as val.
+  ///
+  /// If isSigned is true then val is treated as if it were a signed value
+  /// (i.e. as an int64_t) and the appropriate sign extension to the bit width
+  /// will be done. Otherwise, no sign extension occurs (high order bits beyond
+  /// the range of val are zero filled).
+  ///
+  /// \param numBits the bit width of the constructed APInt
+  /// \param val the initial value of the APInt
+  /// \param isSigned how to treat signedness of val
+  APInt(unsigned numBits, uint64_t val, bool isSigned = false)
+      : BitWidth(numBits) {
+    assert(BitWidth && "bitwidth too small");
+    if (isSingleWord()) {
+      U.VAL = val;
+      clearUnusedBits();
+    } else {
+      initSlowCase(val, isSigned);
+    }
+  }
+
+  /// \brief Construct an APInt of numBits width, initialized as bigVal[].
+  ///
+  /// Note that bigVal.size() can be smaller or larger than the corresponding
+  /// bit width but any extraneous bits will be dropped.
+  ///
+  /// \param numBits the bit width of the constructed APInt
+  /// \param bigVal a sequence of words to form the initial value of the APInt
+  APInt(unsigned numBits, ArrayRef<uint64_t> bigVal);
+
+  /// Equivalent to APInt(numBits, ArrayRef<uint64_t>(bigVal, numWords)), but
+  /// deprecated because this constructor is prone to ambiguity with the
+  /// APInt(unsigned, uint64_t, bool) constructor.
+  ///
+  /// If this overload is ever deleted, care should be taken to prevent calls
+  /// from being incorrectly captured by the APInt(unsigned, uint64_t, bool)
+  /// constructor.
+  APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[]);
+
+  /// \brief Construct an APInt from a string representation.
+  ///
+  /// This constructor interprets the string \p str in the given radix. The
+  /// interpretation stops when the first character that is not suitable for the
+  /// radix is encountered, or the end of the string. Acceptable radix values
+  /// are 2, 8, 10, 16, and 36. It is an error for the value implied by the
+  /// string to require more bits than numBits.
+  ///
+  /// \param numBits the bit width of the constructed APInt
+  /// \param str the string to be interpreted
+  /// \param radix the radix to use for the conversion
+  APInt(unsigned numBits, StringRef str, uint8_t radix);
+
+  /// Simply makes *this a copy of that.
+  /// @brief Copy Constructor.
+  APInt(const APInt &that) : BitWidth(that.BitWidth) {
+    if (isSingleWord())
+      U.VAL = that.U.VAL;
+    else
+      initSlowCase(that);
+  }
+
+  /// \brief Move Constructor.
+  APInt(APInt &&that) : BitWidth(that.BitWidth) {
+    memcpy(&U, &that.U, sizeof(U));
+    that.BitWidth = 0;
+  }
+
+  /// \brief Destructor.
+  ~APInt() {
+    if (needsCleanup())
+      delete[] U.pVal;
+  }
+
+  /// \brief Default constructor that creates an uninteresting APInt
+  /// representing a 1-bit zero value.
+  ///
+  /// This is useful for object deserialization (pair this with the static
+  ///  method Read).
+  explicit APInt() : BitWidth(1) { U.VAL = 0; }
+
+  /// \brief Returns whether this instance allocated memory.
+  bool needsCleanup() const { return !isSingleWord(); }
+
+  /// Used to insert APInt objects, or objects that contain APInt objects, into
+  ///  FoldingSets.
+  void Profile(FoldingSetNodeID &id) const;
+
+  /// @}
+  /// \name Value Tests
+  /// @{
+
+  /// \brief Determine sign of this APInt.
+  ///
+  /// This tests the high bit of this APInt to determine if it is set.
+  ///
+  /// \returns true if this APInt is negative, false otherwise
+  bool isNegative() const { return (*this)[BitWidth - 1]; }
+
+  /// \brief Determine if this APInt Value is non-negative (>= 0)
+  ///
+  /// This tests the high bit of the APInt to determine if it is unset.
+  bool isNonNegative() const { return !isNegative(); }
+
+  /// \brief Determine if sign bit of this APInt is set.
+  ///
+  /// This tests the high bit of this APInt to determine if it is set.
+  ///
+  /// \returns true if this APInt has its sign bit set, false otherwise.
+  bool isSignBitSet() const { return (*this)[BitWidth-1]; }
+
+  /// \brief Determine if sign bit of this APInt is clear.
+  ///
+  /// This tests the high bit of this APInt to determine if it is clear.
+  ///
+  /// \returns true if this APInt has its sign bit clear, false otherwise.
+  bool isSignBitClear() const { return !isSignBitSet(); }
+
+  /// \brief Determine if this APInt Value is positive.
+  ///
+  /// This tests if the value of this APInt is positive (> 0). Note
+  /// that 0 is not a positive value.
+  ///
+  /// \returns true if this APInt is positive.
+  bool isStrictlyPositive() const { return isNonNegative() && !isNullValue(); }
+
+  /// \brief Determine if all bits are set
+  ///
+  /// This checks to see if the value has all bits of the APInt are set or not.
+  bool isAllOnesValue() const {
+    if (isSingleWord())
+      return U.VAL == WORD_MAX >> (APINT_BITS_PER_WORD - BitWidth);
+    return countTrailingOnesSlowCase() == BitWidth;
+  }
+
+  /// \brief Determine if all bits are clear
+  ///
+  /// This checks to see if the value has all bits of the APInt are clear or
+  /// not.
+  bool isNullValue() const { return !*this; }
+
+  /// \brief Determine if this is a value of 1.
+  ///
+  /// This checks to see if the value of this APInt is one.
+  bool isOneValue() const {
+    if (isSingleWord())
+      return U.VAL == 1;
+    return countLeadingZerosSlowCase() == BitWidth - 1;
+  }
+
+  /// \brief Determine if this is the largest unsigned value.
+  ///
+  /// This checks to see if the value of this APInt is the maximum unsigned
+  /// value for the APInt's bit width.
+  bool isMaxValue() const { return isAllOnesValue(); }
+
+  /// \brief Determine if this is the largest signed value.
+  ///
+  /// This checks to see if the value of this APInt is the maximum signed
+  /// value for the APInt's bit width.
+  bool isMaxSignedValue() const {
+    if (isSingleWord())
+      return U.VAL == ((WordType(1) << (BitWidth - 1)) - 1);
+    return !isNegative() && countTrailingOnesSlowCase() == BitWidth - 1;
+  }
+
+  /// \brief Determine if this is the smallest unsigned value.
+  ///
+  /// This checks to see if the value of this APInt is the minimum unsigned
+  /// value for the APInt's bit width.
+  bool isMinValue() const { return isNullValue(); }
+
+  /// \brief Determine if this is the smallest signed value.
+  ///
+  /// This checks to see if the value of this APInt is the minimum signed
+  /// value for the APInt's bit width.
+  bool isMinSignedValue() const {
+    if (isSingleWord())
+      return U.VAL == (WordType(1) << (BitWidth - 1));
+    return isNegative() && countTrailingZerosSlowCase() == BitWidth - 1;
+  }
+
+  /// \brief Check if this APInt has an N-bits unsigned integer value.
+  bool isIntN(unsigned N) const {
+    assert(N && "N == 0 ???");
+    return getActiveBits() <= N;
+  }
+
+  /// \brief Check if this APInt has an N-bits signed integer value.
+  bool isSignedIntN(unsigned N) const {
+    assert(N && "N == 0 ???");
+    return getMinSignedBits() <= N;
+  }
+
+  /// \brief Check if this APInt's value is a power of two greater than zero.
+  ///
+  /// \returns true if the argument APInt value is a power of two > 0.
+  bool isPowerOf2() const {
+    if (isSingleWord())
+      return isPowerOf2_64(U.VAL);
+    return countPopulationSlowCase() == 1;
+  }
+
+  /// \brief Check if the APInt's value is returned by getSignMask.
+  ///
+  /// \returns true if this is the value returned by getSignMask.
+  bool isSignMask() const { return isMinSignedValue(); }
+
+  /// \brief Convert APInt to a boolean value.
+  ///
+  /// This converts the APInt to a boolean value as a test against zero.
+  bool getBoolValue() const { return !!*this; }
+
+  /// If this value is smaller than the specified limit, return it, otherwise
+  /// return the limit value.  This causes the value to saturate to the limit.
+  uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX) const {
+    return ugt(Limit) ? Limit : getZExtValue();
+  }
+
+  /// \brief Check if the APInt consists of a repeated bit pattern.
+  ///
+  /// e.g. 0x01010101 satisfies isSplat(8).
+  /// \param SplatSizeInBits The size of the pattern in bits. Must divide bit
+  /// width without remainder.
+  bool isSplat(unsigned SplatSizeInBits) const;
+
+  /// \returns true if this APInt value is a sequence of \param numBits ones
+  /// starting at the least significant bit with the remainder zero.
+  bool isMask(unsigned numBits) const {
+    assert(numBits != 0 && "numBits must be non-zero");
+    assert(numBits <= BitWidth && "numBits out of range");
+    if (isSingleWord())
+      return U.VAL == (WORD_MAX >> (APINT_BITS_PER_WORD - numBits));
+    unsigned Ones = countTrailingOnesSlowCase();
+    return (numBits == Ones) &&
+           ((Ones + countLeadingZerosSlowCase()) == BitWidth);
+  }
+
+  /// \returns true if this APInt is a non-empty sequence of ones starting at
+  /// the least significant bit with the remainder zero.
+  /// Ex. isMask(0x0000FFFFU) == true.
+  bool isMask() const {
+    if (isSingleWord())
+      return isMask_64(U.VAL);
+    unsigned Ones = countTrailingOnesSlowCase();
+    return (Ones > 0) && ((Ones + countLeadingZerosSlowCase()) == BitWidth);
+  }
+
+  /// \brief Return true if this APInt value contains a sequence of ones with
+  /// the remainder zero.
+  bool isShiftedMask() const {
+    if (isSingleWord())
+      return isShiftedMask_64(U.VAL);
+    unsigned Ones = countPopulationSlowCase();
+    unsigned LeadZ = countLeadingZerosSlowCase();
+    return (Ones + LeadZ + countTrailingZeros()) == BitWidth;
+  }
+
+  /// @}
+  /// \name Value Generators
+  /// @{
+
+  /// \brief Gets maximum unsigned value of APInt for specific bit width.
+  static APInt getMaxValue(unsigned numBits) {
+    return getAllOnesValue(numBits);
+  }
+
+  /// \brief Gets maximum signed value of APInt for a specific bit width.
+  static APInt getSignedMaxValue(unsigned numBits) {
+    APInt API = getAllOnesValue(numBits);
+    API.clearBit(numBits - 1);
+    return API;
+  }
+
+  /// \brief Gets minimum unsigned value of APInt for a specific bit width.
+  static APInt getMinValue(unsigned numBits) { return APInt(numBits, 0); }
+
+  /// \brief Gets minimum signed value of APInt for a specific bit width.
+  static APInt getSignedMinValue(unsigned numBits) {
+    APInt API(numBits, 0);
+    API.setBit(numBits - 1);
+    return API;
+  }
+
+  /// \brief Get the SignMask for a specific bit width.
+  ///
+  /// This is just a wrapper function of getSignedMinValue(), and it helps code
+  /// readability when we want to get a SignMask.
+  static APInt getSignMask(unsigned BitWidth) {
+    return getSignedMinValue(BitWidth);
+  }
+
+  /// \brief Get the all-ones value.
+  ///
+  /// \returns the all-ones value for an APInt of the specified bit-width.
+  static APInt getAllOnesValue(unsigned numBits) {
+    return APInt(numBits, WORD_MAX, true);
+  }
+
+  /// \brief Get the '0' value.
+  ///
+  /// \returns the '0' value for an APInt of the specified bit-width.
+  static APInt getNullValue(unsigned numBits) { return APInt(numBits, 0); }
+
+  /// \brief Compute an APInt containing numBits highbits from this APInt.
+  ///
+  /// Get an APInt with the same BitWidth as this APInt, just zero mask
+  /// the low bits and right shift to the least significant bit.
+  ///
+  /// \returns the high "numBits" bits of this APInt.
+  APInt getHiBits(unsigned numBits) const;
+
+  /// \brief Compute an APInt containing numBits lowbits from this APInt.
+  ///
+  /// Get an APInt with the same BitWidth as this APInt, just zero mask
+  /// the high bits.
+  ///
+  /// \returns the low "numBits" bits of this APInt.
+  APInt getLoBits(unsigned numBits) const;
+
+  /// \brief Return an APInt with exactly one bit set in the result.
+  static APInt getOneBitSet(unsigned numBits, unsigned BitNo) {
+    APInt Res(numBits, 0);
+    Res.setBit(BitNo);
+    return Res;
+  }
+
+  /// \brief Get a value with a block of bits set.
+  ///
+  /// Constructs an APInt value that has a contiguous range of bits set. The
+  /// bits from loBit (inclusive) to hiBit (exclusive) will be set. All other
+  /// bits will be zero. For example, with parameters(32, 0, 16) you would get
+  /// 0x0000FFFF. If hiBit is less than loBit then the set bits "wrap". For
+  /// example, with parameters (32, 28, 4), you would get 0xF000000F.
+  ///
+  /// \param numBits the intended bit width of the result
+  /// \param loBit the index of the lowest bit set.
+  /// \param hiBit the index of the highest bit set.
+  ///
+  /// \returns An APInt value with the requested bits set.
+  static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit) {
+    APInt Res(numBits, 0);
+    Res.setBits(loBit, hiBit);
+    return Res;
+  }
+
+  /// \brief Get a value with upper bits starting at loBit set.
+  ///
+  /// Constructs an APInt value that has a contiguous range of bits set. The
+  /// bits from loBit (inclusive) to numBits (exclusive) will be set. All other
+  /// bits will be zero. For example, with parameters(32, 12) you would get
+  /// 0xFFFFF000.
+  ///
+  /// \param numBits the intended bit width of the result
+  /// \param loBit the index of the lowest bit to set.
+  ///
+  /// \returns An APInt value with the requested bits set.
+  static APInt getBitsSetFrom(unsigned numBits, unsigned loBit) {
+    APInt Res(numBits, 0);
+    Res.setBitsFrom(loBit);
+    return Res;
+  }
+
+  /// \brief Get a value with high bits set
+  ///
+  /// Constructs an APInt value that has the top hiBitsSet bits set.
+  ///
+  /// \param numBits the bitwidth of the result
+  /// \param hiBitsSet the number of high-order bits set in the result.
+  static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet) {
+    APInt Res(numBits, 0);
+    Res.setHighBits(hiBitsSet);
+    return Res;
+  }
+
+  /// \brief Get a value with low bits set
+  ///
+  /// Constructs an APInt value that has the bottom loBitsSet bits set.
+  ///
+  /// \param numBits the bitwidth of the result
+  /// \param loBitsSet the number of low-order bits set in the result.
+  static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet) {
+    APInt Res(numBits, 0);
+    Res.setLowBits(loBitsSet);
+    return Res;
+  }
+
+  /// \brief Return a value containing V broadcasted over NewLen bits.
+  static APInt getSplat(unsigned NewLen, const APInt &V);
+
+  /// \brief Determine if two APInts have the same value, after zero-extending
+  /// one of them (if needed!) to ensure that the bit-widths match.
+  static bool isSameValue(const APInt &I1, const APInt &I2) {
+    if (I1.getBitWidth() == I2.getBitWidth())
+      return I1 == I2;
+
+    if (I1.getBitWidth() > I2.getBitWidth())
+      return I1 == I2.zext(I1.getBitWidth());
+
+    return I1.zext(I2.getBitWidth()) == I2;
+  }
+
+  /// \brief Overload to compute a hash_code for an APInt value.
+  friend hash_code hash_value(const APInt &Arg);
+
+  /// This function returns a pointer to the internal storage of the APInt.
+  /// This is useful for writing out the APInt in binary form without any
+  /// conversions.
+  const uint64_t *getRawData() const {
+    if (isSingleWord())
+      return &U.VAL;
+    return &U.pVal[0];
+  }
+
+  /// @}
+  /// \name Unary Operators
+  /// @{
+
+  /// \brief Postfix increment operator.
+  ///
+  /// Increments *this by 1.
+  ///
+  /// \returns a new APInt value representing the original value of *this.
+  const APInt operator++(int) {
+    APInt API(*this);
+    ++(*this);
+    return API;
+  }
+
+  /// \brief Prefix increment operator.
+  ///
+  /// \returns *this incremented by one
+  APInt &operator++();
+
+  /// \brief Postfix decrement operator.
+  ///
+  /// Decrements *this by 1.
+  ///
+  /// \returns a new APInt value representing the original value of *this.
+  const APInt operator--(int) {
+    APInt API(*this);
+    --(*this);
+    return API;
+  }
+
+  /// \brief Prefix decrement operator.
+  ///
+  /// \returns *this decremented by one.
+  APInt &operator--();
+
+  /// \brief Logical negation operator.
+  ///
+  /// Performs logical negation operation on this APInt.
+  ///
+  /// \returns true if *this is zero, false otherwise.
+  bool operator!() const {
+    if (isSingleWord())
+      return U.VAL == 0;
+    return countLeadingZerosSlowCase() == BitWidth;
+  }
+
+  /// @}
+  /// \name Assignment Operators
+  /// @{
+
+  /// \brief Copy assignment operator.
+  ///
+  /// \returns *this after assignment of RHS.
+  APInt &operator=(const APInt &RHS) {
+    // If the bitwidths are the same, we can avoid mucking with memory
+    if (isSingleWord() && RHS.isSingleWord()) {
+      U.VAL = RHS.U.VAL;
+      BitWidth = RHS.BitWidth;
+      return clearUnusedBits();
+    }
+
+    AssignSlowCase(RHS);
+    return *this;
+  }
+
+  /// @brief Move assignment operator.
+  APInt &operator=(APInt &&that) {
+    assert(this != &that && "Self-move not supported");
+    if (!isSingleWord())
+      delete[] U.pVal;
+
+    // Use memcpy so that type based alias analysis sees both VAL and pVal
+    // as modified.
+    memcpy(&U, &that.U, sizeof(U));
+
+    BitWidth = that.BitWidth;
+    that.BitWidth = 0;
+
+    return *this;
+  }
+
+  /// \brief Assignment operator.
+  ///
+  /// The RHS value is assigned to *this. If the significant bits in RHS exceed
+  /// the bit width, the excess bits are truncated. If the bit width is larger
+  /// than 64, the value is zero filled in the unspecified high order bits.
+  ///
+  /// \returns *this after assignment of RHS value.
+  APInt &operator=(uint64_t RHS) {
+    if (isSingleWord()) {
+      U.VAL = RHS;
+      clearUnusedBits();
+    } else {
+      U.pVal[0] = RHS;
+      memset(U.pVal+1, 0, (getNumWords() - 1) * APINT_WORD_SIZE);
+    }
+    return *this;
+  }
+
+  /// \brief Bitwise AND assignment operator.
+  ///
+  /// Performs a bitwise AND operation on this APInt and RHS. The result is
+  /// assigned to *this.
+  ///
+  /// \returns *this after ANDing with RHS.
+  APInt &operator&=(const APInt &RHS) {
+    assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+    if (isSingleWord())
+      U.VAL &= RHS.U.VAL;
+    else
+      AndAssignSlowCase(RHS);
+    return *this;
+  }
+
+  /// \brief Bitwise AND assignment operator.
+  ///
+  /// Performs a bitwise AND operation on this APInt and RHS. RHS is
+  /// logically zero-extended or truncated to match the bit-width of
+  /// the LHS.
+  APInt &operator&=(uint64_t RHS) {
+    if (isSingleWord()) {
+      U.VAL &= RHS;
+      return *this;
+    }
+    U.pVal[0] &= RHS;
+    memset(U.pVal+1, 0, (getNumWords() - 1) * APINT_WORD_SIZE);
+    return *this;
+  }
+
+  /// \brief Bitwise OR assignment operator.
+  ///
+  /// Performs a bitwise OR operation on this APInt and RHS. The result is
+  /// assigned *this;
+  ///
+  /// \returns *this after ORing with RHS.
+  APInt &operator|=(const APInt &RHS) {
+    assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+    if (isSingleWord())
+      U.VAL |= RHS.U.VAL;
+    else
+      OrAssignSlowCase(RHS);
+    return *this;
+  }
+
+  /// \brief Bitwise OR assignment operator.
+  ///
+  /// Performs a bitwise OR operation on this APInt and RHS. RHS is
+  /// logically zero-extended or truncated to match the bit-width of
+  /// the LHS.
+  APInt &operator|=(uint64_t RHS) {
+    if (isSingleWord()) {
+      U.VAL |= RHS;
+      clearUnusedBits();
+    } else {
+      U.pVal[0] |= RHS;
+    }
+    return *this;
+  }
+
+  /// \brief Bitwise XOR assignment operator.
+  ///
+  /// Performs a bitwise XOR operation on this APInt and RHS. The result is
+  /// assigned to *this.
+  ///
+  /// \returns *this after XORing with RHS.
+  APInt &operator^=(const APInt &RHS) {
+    assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+    if (isSingleWord())
+      U.VAL ^= RHS.U.VAL;
+    else
+      XorAssignSlowCase(RHS);
+    return *this;
+  }
+
+  /// \brief Bitwise XOR assignment operator.
+  ///
+  /// Performs a bitwise XOR operation on this APInt and RHS. RHS is
+  /// logically zero-extended or truncated to match the bit-width of
+  /// the LHS.
+  APInt &operator^=(uint64_t RHS) {
+    if (isSingleWord()) {
+      U.VAL ^= RHS;
+      clearUnusedBits();
+    } else {
+      U.pVal[0] ^= RHS;
+    }
+    return *this;
+  }
+
+  /// \brief Multiplication assignment operator.
+  ///
+  /// Multiplies this APInt by RHS and assigns the result to *this.
+  ///
+  /// \returns *this
+  APInt &operator*=(const APInt &RHS);
+  APInt &operator*=(uint64_t RHS);
+
+  /// \brief Addition assignment operator.
+  ///
+  /// Adds RHS to *this and assigns the result to *this.
+  ///
+  /// \returns *this
+  APInt &operator+=(const APInt &RHS);
+  APInt &operator+=(uint64_t RHS);
+
+  /// \brief Subtraction assignment operator.
+  ///
+  /// Subtracts RHS from *this and assigns the result to *this.
+  ///
+  /// \returns *this
+  APInt &operator-=(const APInt &RHS);
+  APInt &operator-=(uint64_t RHS);
+
+  /// \brief Left-shift assignment function.
+  ///
+  /// Shifts *this left by shiftAmt and assigns the result to *this.
+  ///
+  /// \returns *this after shifting left by ShiftAmt
+  APInt &operator<<=(unsigned ShiftAmt) {
+    assert(ShiftAmt <= BitWidth && "Invalid shift amount");
+    if (isSingleWord()) {
+      if (ShiftAmt == BitWidth)
+        U.VAL = 0;
+      else
+        U.VAL <<= ShiftAmt;
+      return clearUnusedBits();
+    }
+    shlSlowCase(ShiftAmt);
+    return *this;
+  }
+
+  /// \brief Left-shift assignment function.
+  ///
+  /// Shifts *this left by shiftAmt and assigns the result to *this.
+  ///
+  /// \returns *this after shifting left by ShiftAmt
+  APInt &operator<<=(const APInt &ShiftAmt);
+
+  /// @}
+  /// \name Binary Operators
+  /// @{
+
+  /// \brief Multiplication operator.
+  ///
+  /// Multiplies this APInt by RHS and returns the result.
+  APInt operator*(const APInt &RHS) const;
+
+  /// \brief Left logical shift operator.
+  ///
+  /// Shifts this APInt left by \p Bits and returns the result.
+  APInt operator<<(unsigned Bits) const { return shl(Bits); }
+
+  /// \brief Left logical shift operator.
+  ///
+  /// Shifts this APInt left by \p Bits and returns the result.
+  APInt operator<<(const APInt &Bits) const { return shl(Bits); }
+
+  /// \brief Arithmetic right-shift function.
+  ///
+  /// Arithmetic right-shift this APInt by shiftAmt.
+  APInt ashr(unsigned ShiftAmt) const {
+    APInt R(*this);
+    R.ashrInPlace(ShiftAmt);
+    return R;
+  }
+
+  /// Arithmetic right-shift this APInt by ShiftAmt in place.
+  void ashrInPlace(unsigned ShiftAmt) {
+    assert(ShiftAmt <= BitWidth && "Invalid shift amount");
+    if (isSingleWord()) {
+      int64_t SExtVAL = SignExtend64(U.VAL, BitWidth);
+      if (ShiftAmt == BitWidth)
+        U.VAL = SExtVAL >> (APINT_BITS_PER_WORD - 1); // Fill with sign bit.
+      else
+        U.VAL = SExtVAL >> ShiftAmt;
+      clearUnusedBits();
+      return;
+    }
+    ashrSlowCase(ShiftAmt);
+  }
+
+  /// \brief Logical right-shift function.
+  ///
+  /// Logical right-shift this APInt by shiftAmt.
+  APInt lshr(unsigned shiftAmt) const {
+    APInt R(*this);
+    R.lshrInPlace(shiftAmt);
+    return R;
+  }
+
+  /// Logical right-shift this APInt by ShiftAmt in place.
+  void lshrInPlace(unsigned ShiftAmt) {
+    assert(ShiftAmt <= BitWidth && "Invalid shift amount");
+    if (isSingleWord()) {
+      if (ShiftAmt == BitWidth)
+        U.VAL = 0;
+      else
+        U.VAL >>= ShiftAmt;
+      return;
+    }
+    lshrSlowCase(ShiftAmt);
+  }
+
+  /// \brief Left-shift function.
+  ///
+  /// Left-shift this APInt by shiftAmt.
+  APInt shl(unsigned shiftAmt) const {
+    APInt R(*this);
+    R <<= shiftAmt;
+    return R;
+  }
+
+  /// \brief Rotate left by rotateAmt.
+  APInt rotl(unsigned rotateAmt) const;
+
+  /// \brief Rotate right by rotateAmt.
+  APInt rotr(unsigned rotateAmt) const;
+
+  /// \brief Arithmetic right-shift function.
+  ///
+  /// Arithmetic right-shift this APInt by shiftAmt.
+  APInt ashr(const APInt &ShiftAmt) const {
+    APInt R(*this);
+    R.ashrInPlace(ShiftAmt);
+    return R;
+  }
+
+  /// Arithmetic right-shift this APInt by shiftAmt in place.
+  void ashrInPlace(const APInt &shiftAmt);
+
+  /// \brief Logical right-shift function.
+  ///
+  /// Logical right-shift this APInt by shiftAmt.
+  APInt lshr(const APInt &ShiftAmt) const {
+    APInt R(*this);
+    R.lshrInPlace(ShiftAmt);
+    return R;
+  }
+
+  /// Logical right-shift this APInt by ShiftAmt in place.
+  void lshrInPlace(const APInt &ShiftAmt);
+
+  /// \brief Left-shift function.
+  ///
+  /// Left-shift this APInt by shiftAmt.
+  APInt shl(const APInt &ShiftAmt) const {
+    APInt R(*this);
+    R <<= ShiftAmt;
+    return R;
+  }
+
+  /// \brief Rotate left by rotateAmt.
+  APInt rotl(const APInt &rotateAmt) const;
+
+  /// \brief Rotate right by rotateAmt.
+  APInt rotr(const APInt &rotateAmt) const;
+
+  /// \brief Unsigned division operation.
+  ///
+  /// Perform an unsigned divide operation on this APInt by RHS. Both this and
+  /// RHS are treated as unsigned quantities for purposes of this division.
+  ///
+  /// \returns a new APInt value containing the division result
+  APInt udiv(const APInt &RHS) const;
+  APInt udiv(uint64_t RHS) const;
+
+  /// \brief Signed division function for APInt.
+  ///
+  /// Signed divide this APInt by APInt RHS.
+  APInt sdiv(const APInt &RHS) const;
+  APInt sdiv(int64_t RHS) const;
+
+  /// \brief Unsigned remainder operation.
+  ///
+  /// Perform an unsigned remainder operation on this APInt with RHS being the
+  /// divisor. Both this and RHS are treated as unsigned quantities for purposes
+  /// of this operation. Note that this is a true remainder operation and not a
+  /// modulo operation because the sign follows the sign of the dividend which
+  /// is *this.
+  ///
+  /// \returns a new APInt value containing the remainder result
+  APInt urem(const APInt &RHS) const;
+  uint64_t urem(uint64_t RHS) const;
+
+  /// \brief Function for signed remainder operation.
+  ///
+  /// Signed remainder operation on APInt.
+  APInt srem(const APInt &RHS) const;
+  int64_t srem(int64_t RHS) const;
+
+  /// \brief Dual division/remainder interface.
+  ///
+  /// Sometimes it is convenient to divide two APInt values and obtain both the
+  /// quotient and remainder. This function does both operations in the same
+  /// computation making it a little more efficient. The pair of input arguments
+  /// may overlap with the pair of output arguments. It is safe to call
+  /// udivrem(X, Y, X, Y), for example.
+  static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient,
+                      APInt &Remainder);
+  static void udivrem(const APInt &LHS, uint64_t RHS, APInt &Quotient,
+                      uint64_t &Remainder);
+
+  static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient,
+                      APInt &Remainder);
+  static void sdivrem(const APInt &LHS, int64_t RHS, APInt &Quotient,
+                      int64_t &Remainder);
+
+  // Operations that return overflow indicators.
+  APInt sadd_ov(const APInt &RHS, bool &Overflow) const;
+  APInt uadd_ov(const APInt &RHS, bool &Overflow) const;
+  APInt ssub_ov(const APInt &RHS, bool &Overflow) const;
+  APInt usub_ov(const APInt &RHS, bool &Overflow) const;
+  APInt sdiv_ov(const APInt &RHS, bool &Overflow) const;
+  APInt smul_ov(const APInt &RHS, bool &Overflow) const;
+  APInt umul_ov(const APInt &RHS, bool &Overflow) const;
+  APInt sshl_ov(const APInt &Amt, bool &Overflow) const;
+  APInt ushl_ov(const APInt &Amt, bool &Overflow) const;
+
+  /// \brief Array-indexing support.
+  ///
+  /// \returns the bit value at bitPosition
+  bool operator[](unsigned bitPosition) const {
+    assert(bitPosition < getBitWidth() && "Bit position out of bounds!");
+    return (maskBit(bitPosition) & getWord(bitPosition)) != 0;
+  }
+
+  /// @}
+  /// \name Comparison Operators
+  /// @{
+
+  /// \brief Equality operator.
+  ///
+  /// Compares this APInt with RHS for the validity of the equality
+  /// relationship.
+  bool operator==(const APInt &RHS) const {
+    assert(BitWidth == RHS.BitWidth && "Comparison requires equal bit widths");
+    if (isSingleWord())
+      return U.VAL == RHS.U.VAL;
+    return EqualSlowCase(RHS);
+  }
+
+  /// \brief Equality operator.
+  ///
+  /// Compares this APInt with a uint64_t for the validity of the equality
+  /// relationship.
+  ///
+  /// \returns true if *this == Val
+  bool operator==(uint64_t Val) const {
+    return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() == Val;
+  }
+
+  /// \brief Equality comparison.
+  ///
+  /// Compares this APInt with RHS for the validity of the equality
+  /// relationship.
+  ///
+  /// \returns true if *this == Val
+  bool eq(const APInt &RHS) const { return (*this) == RHS; }
+
+  /// \brief Inequality operator.
+  ///
+  /// Compares this APInt with RHS for the validity of the inequality
+  /// relationship.
+  ///
+  /// \returns true if *this != Val
+  bool operator!=(const APInt &RHS) const { return !((*this) == RHS); }
+
+  /// \brief Inequality operator.
+  ///
+  /// Compares this APInt with a uint64_t for the validity of the inequality
+  /// relationship.
+  ///
+  /// \returns true if *this != Val
+  bool operator!=(uint64_t Val) const { return !((*this) == Val); }
+
+  /// \brief Inequality comparison
+  ///
+  /// Compares this APInt with RHS for the validity of the inequality
+  /// relationship.
+  ///
+  /// \returns true if *this != Val
+  bool ne(const APInt &RHS) const { return !((*this) == RHS); }
+
+  /// \brief Unsigned less than comparison
+  ///
+  /// Regards both *this and RHS as unsigned quantities and compares them for
+  /// the validity of the less-than relationship.
+  ///
+  /// \returns true if *this < RHS when both are considered unsigned.
+  bool ult(const APInt &RHS) const { return compare(RHS) < 0; }
+
+  /// \brief Unsigned less than comparison
+  ///
+  /// Regards both *this as an unsigned quantity and compares it with RHS for
+  /// the validity of the less-than relationship.
+  ///
+  /// \returns true if *this < RHS when considered unsigned.
+  bool ult(uint64_t RHS) const {
+    // Only need to check active bits if not a single word.
+    return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() < RHS;
+  }
+
+  /// \brief Signed less than comparison
+  ///
+  /// Regards both *this and RHS as signed quantities and compares them for
+  /// validity of the less-than relationship.
+  ///
+  /// \returns true if *this < RHS when both are considered signed.
+  bool slt(const APInt &RHS) const { return compareSigned(RHS) < 0; }
+
+  /// \brief Signed less than comparison
+  ///
+  /// Regards both *this as a signed quantity and compares it with RHS for
+  /// the validity of the less-than relationship.
+  ///
+  /// \returns true if *this < RHS when considered signed.
+  bool slt(int64_t RHS) const {
+    return (!isSingleWord() && getMinSignedBits() > 64) ? isNegative()
+                                                        : getSExtValue() < RHS;
+  }
+
+  /// \brief Unsigned less or equal comparison
+  ///
+  /// Regards both *this and RHS as unsigned quantities and compares them for
+  /// validity of the less-or-equal relationship.
+  ///
+  /// \returns true if *this <= RHS when both are considered unsigned.
+  bool ule(const APInt &RHS) const { return compare(RHS) <= 0; }
+
+  /// \brief Unsigned less or equal comparison
+  ///
+  /// Regards both *this as an unsigned quantity and compares it with RHS for
+  /// the validity of the less-or-equal relationship.
+  ///
+  /// \returns true if *this <= RHS when considered unsigned.
+  bool ule(uint64_t RHS) const { return !ugt(RHS); }
+
+  /// \brief Signed less or equal comparison
+  ///
+  /// Regards both *this and RHS as signed quantities and compares them for
+  /// validity of the less-or-equal relationship.
+  ///
+  /// \returns true if *this <= RHS when both are considered signed.
+  bool sle(const APInt &RHS) const { return compareSigned(RHS) <= 0; }
+
+  /// \brief Signed less or equal comparison
+  ///
+  /// Regards both *this as a signed quantity and compares it with RHS for the
+  /// validity of the less-or-equal relationship.
+  ///
+  /// \returns true if *this <= RHS when considered signed.
+  bool sle(uint64_t RHS) const { return !sgt(RHS); }
+
+  /// \brief Unsigned greather than comparison
+  ///
+  /// Regards both *this and RHS as unsigned quantities and compares them for
+  /// the validity of the greater-than relationship.
+  ///
+  /// \returns true if *this > RHS when both are considered unsigned.
+  bool ugt(const APInt &RHS) const { return !ule(RHS); }
+
+  /// \brief Unsigned greater than comparison
+  ///
+  /// Regards both *this as an unsigned quantity and compares it with RHS for
+  /// the validity of the greater-than relationship.
+  ///
+  /// \returns true if *this > RHS when considered unsigned.
+  bool ugt(uint64_t RHS) const {
+    // Only need to check active bits if not a single word.
+    return (!isSingleWord() && getActiveBits() > 64) || getZExtValue() > RHS;
+  }
+
+  /// \brief Signed greather than comparison
+  ///
+  /// Regards both *this and RHS as signed quantities and compares them for the
+  /// validity of the greater-than relationship.
+  ///
+  /// \returns true if *this > RHS when both are considered signed.
+  bool sgt(const APInt &RHS) const { return !sle(RHS); }
+
+  /// \brief Signed greater than comparison
+  ///
+  /// Regards both *this as a signed quantity and compares it with RHS for
+  /// the validity of the greater-than relationship.
+  ///
+  /// \returns true if *this > RHS when considered signed.
+  bool sgt(int64_t RHS) const {
+    return (!isSingleWord() && getMinSignedBits() > 64) ? !isNegative()
+                                                        : getSExtValue() > RHS;
+  }
+
+  /// \brief Unsigned greater or equal comparison
+  ///
+  /// Regards both *this and RHS as unsigned quantities and compares them for
+  /// validity of the greater-or-equal relationship.
+  ///
+  /// \returns true if *this >= RHS when both are considered unsigned.
+  bool uge(const APInt &RHS) const { return !ult(RHS); }
+
+  /// \brief Unsigned greater or equal comparison
+  ///
+  /// Regards both *this as an unsigned quantity and compares it with RHS for
+  /// the validity of the greater-or-equal relationship.
+  ///
+  /// \returns true if *this >= RHS when considered unsigned.
+  bool uge(uint64_t RHS) const { return !ult(RHS); }
+
+  /// \brief Signed greater or equal comparison
+  ///
+  /// Regards both *this and RHS as signed quantities and compares them for
+  /// validity of the greater-or-equal relationship.
+  ///
+  /// \returns true if *this >= RHS when both are considered signed.
+  bool sge(const APInt &RHS) const { return !slt(RHS); }
+
+  /// \brief Signed greater or equal comparison
+  ///
+  /// Regards both *this as a signed quantity and compares it with RHS for
+  /// the validity of the greater-or-equal relationship.
+  ///
+  /// \returns true if *this >= RHS when considered signed.
+  bool sge(int64_t RHS) const { return !slt(RHS); }
+
+  /// This operation tests if there are any pairs of corresponding bits
+  /// between this APInt and RHS that are both set.
+  bool intersects(const APInt &RHS) const {
+    assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+    if (isSingleWord())
+      return (U.VAL & RHS.U.VAL) != 0;
+    return intersectsSlowCase(RHS);
+  }
+
+  /// This operation checks that all bits set in this APInt are also set in RHS.
+  bool isSubsetOf(const APInt &RHS) const {
+    assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+    if (isSingleWord())
+      return (U.VAL & ~RHS.U.VAL) == 0;
+    return isSubsetOfSlowCase(RHS);
+  }
+
+  /// @}
+  /// \name Resizing Operators
+  /// @{
+
+  /// \brief Truncate to new width.
+  ///
+  /// Truncate the APInt to a specified width. It is an error to specify a width
+  /// that is greater than or equal to the current width.
+  APInt trunc(unsigned width) const;
+
+  /// \brief Sign extend to a new width.
+  ///
+  /// This operation sign extends the APInt to a new width. If the high order
+  /// bit is set, the fill on the left will be done with 1 bits, otherwise zero.
+  /// It is an error to specify a width that is less than or equal to the
+  /// current width.
+  APInt sext(unsigned width) const;
+
+  /// \brief Zero extend to a new width.
+  ///
+  /// This operation zero extends the APInt to a new width. The high order bits
+  /// are filled with 0 bits.  It is an error to specify a width that is less
+  /// than or equal to the current width.
+  APInt zext(unsigned width) const;
+
+  /// \brief Sign extend or truncate to width
+  ///
+  /// Make this APInt have the bit width given by \p width. The value is sign
+  /// extended, truncated, or left alone to make it that width.
+  APInt sextOrTrunc(unsigned width) const;
+
+  /// \brief Zero extend or truncate to width
+  ///
+  /// Make this APInt have the bit width given by \p width. The value is zero
+  /// extended, truncated, or left alone to make it that width.
+  APInt zextOrTrunc(unsigned width) const;
+
+  /// \brief Sign extend or truncate to width
+  ///
+  /// Make this APInt have the bit width given by \p width. The value is sign
+  /// extended, or left alone to make it that width.
+  APInt sextOrSelf(unsigned width) const;
+
+  /// \brief Zero extend or truncate to width
+  ///
+  /// Make this APInt have the bit width given by \p width. The value is zero
+  /// extended, or left alone to make it that width.
+  APInt zextOrSelf(unsigned width) const;
+
+  /// @}
+  /// \name Bit Manipulation Operators
+  /// @{
+
+  /// \brief Set every bit to 1.
+  void setAllBits() {
+    if (isSingleWord())
+      U.VAL = WORD_MAX;
+    else
+      // Set all the bits in all the words.
+      memset(U.pVal, -1, getNumWords() * APINT_WORD_SIZE);
+    // Clear the unused ones
+    clearUnusedBits();
+  }
+
+  /// \brief Set a given bit to 1.
+  ///
+  /// Set the given bit to 1 whose position is given as "bitPosition".
+  void setBit(unsigned BitPosition) {
+    assert(BitPosition <= BitWidth && "BitPosition out of range");
+    WordType Mask = maskBit(BitPosition);
+    if (isSingleWord())
+      U.VAL |= Mask;
+    else
+      U.pVal[whichWord(BitPosition)] |= Mask;
+  }
+
+  /// Set the sign bit to 1.
+  void setSignBit() {
+    setBit(BitWidth - 1);
+  }
+
+  /// Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
+  void setBits(unsigned loBit, unsigned hiBit) {
+    assert(hiBit <= BitWidth && "hiBit out of range");
+    assert(loBit <= BitWidth && "loBit out of range");
+    assert(loBit <= hiBit && "loBit greater than hiBit");
+    if (loBit == hiBit)
+      return;
+    if (loBit < APINT_BITS_PER_WORD && hiBit <= APINT_BITS_PER_WORD) {
+      uint64_t mask = WORD_MAX >> (APINT_BITS_PER_WORD - (hiBit - loBit));
+      mask <<= loBit;
+      if (isSingleWord())
+        U.VAL |= mask;
+      else
+        U.pVal[0] |= mask;
+    } else {
+      setBitsSlowCase(loBit, hiBit);
+    }
+  }
+
+  /// Set the top bits starting from loBit.
+  void setBitsFrom(unsigned loBit) {
+    return setBits(loBit, BitWidth);
+  }
+
+  /// Set the bottom loBits bits.
+  void setLowBits(unsigned loBits) {
+    return setBits(0, loBits);
+  }
+
+  /// Set the top hiBits bits.
+  void setHighBits(unsigned hiBits) {
+    return setBits(BitWidth - hiBits, BitWidth);
+  }
+
+  /// \brief Set every bit to 0.
+  void clearAllBits() {
+    if (isSingleWord())
+      U.VAL = 0;
+    else
+      memset(U.pVal, 0, getNumWords() * APINT_WORD_SIZE);
+  }
+
+  /// \brief Set a given bit to 0.
+  ///
+  /// Set the given bit to 0 whose position is given as "bitPosition".
+  void clearBit(unsigned BitPosition) {
+    assert(BitPosition <= BitWidth && "BitPosition out of range");
+    WordType Mask = ~maskBit(BitPosition);
+    if (isSingleWord())
+      U.VAL &= Mask;
+    else
+      U.pVal[whichWord(BitPosition)] &= Mask;
+  }
+
+  /// Set the sign bit to 0.
+  void clearSignBit() {
+    clearBit(BitWidth - 1);
+  }
+
+  /// \brief Toggle every bit to its opposite value.
+  void flipAllBits() {
+    if (isSingleWord()) {
+      U.VAL ^= WORD_MAX;
+      clearUnusedBits();
+    } else {
+      flipAllBitsSlowCase();
+    }
+  }
+
+  /// \brief Toggles a given bit to its opposite value.
+  ///
+  /// Toggle a given bit to its opposite value whose position is given
+  /// as "bitPosition".
+  void flipBit(unsigned bitPosition);
+
+  /// Negate this APInt in place.
+  void negate() {
+    flipAllBits();
+    ++(*this);
+  }
+
+  /// Insert the bits from a smaller APInt starting at bitPosition.
+  void insertBits(const APInt &SubBits, unsigned bitPosition);
+
+  /// Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
+  APInt extractBits(unsigned numBits, unsigned bitPosition) const;
+
+  /// @}
+  /// \name Value Characterization Functions
+  /// @{
+
+  /// \brief Return the number of bits in the APInt.
+  unsigned getBitWidth() const { return BitWidth; }
+
+  /// \brief Get the number of words.
+  ///
+  /// Here one word's bitwidth equals to that of uint64_t.
+  ///
+  /// \returns the number of words to hold the integer value of this APInt.
+  unsigned getNumWords() const { return getNumWords(BitWidth); }
+
+  /// \brief Get the number of words.
+  ///
+  /// *NOTE* Here one word's bitwidth equals to that of uint64_t.
+  ///
+  /// \returns the number of words to hold the integer value with a given bit
+  /// width.
+  static unsigned getNumWords(unsigned BitWidth) {
+    return ((uint64_t)BitWidth + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD;
+  }
+
+  /// \brief Compute the number of active bits in the value
+  ///
+  /// This function returns the number of active bits which is defined as the
+  /// bit width minus the number of leading zeros. This is used in several
+  /// computations to see how "wide" the value is.
+  unsigned getActiveBits() const { return BitWidth - countLeadingZeros(); }
+
+  /// \brief Compute the number of active words in the value of this APInt.
+  ///
+  /// This is used in conjunction with getActiveData to extract the raw value of
+  /// the APInt.
+  unsigned getActiveWords() const {
+    unsigned numActiveBits = getActiveBits();
+    return numActiveBits ? whichWord(numActiveBits - 1) + 1 : 1;
+  }
+
+  /// \brief Get the minimum bit size for this signed APInt
+  ///
+  /// Computes the minimum bit width for this APInt while considering it to be a
+  /// signed (and probably negative) value. If the value is not negative, this
+  /// function returns the same value as getActiveBits()+1. Otherwise, it
+  /// returns the smallest bit width that will retain the negative value. For
+  /// example, -1 can be written as 0b1 or 0xFFFFFFFFFF. 0b1 is shorter and so
+  /// for -1, this function will always return 1.
+  unsigned getMinSignedBits() const {
+    if (isNegative())
+      return BitWidth - countLeadingOnes() + 1;
+    return getActiveBits() + 1;
+  }
+
+  /// \brief Get zero extended value
+  ///
+  /// This method attempts to return the value of this APInt as a zero extended
+  /// uint64_t. The bitwidth must be <= 64 or the value must fit within a
+  /// uint64_t. Otherwise an assertion will result.
+  uint64_t getZExtValue() const {
+    if (isSingleWord())
+      return U.VAL;
+    assert(getActiveBits() <= 64 && "Too many bits for uint64_t");
+    return U.pVal[0];
+  }
+
+  /// \brief Get sign extended value
+  ///
+  /// This method attempts to return the value of this APInt as a sign extended
+  /// int64_t. The bit width must be <= 64 or the value must fit within an
+  /// int64_t. Otherwise an assertion will result.
+  int64_t getSExtValue() const {
+    if (isSingleWord())
+      return SignExtend64(U.VAL, BitWidth);
+    assert(getMinSignedBits() <= 64 && "Too many bits for int64_t");
+    return int64_t(U.pVal[0]);
+  }
+
+  /// \brief Get bits required for string value.
+  ///
+  /// This method determines how many bits are required to hold the APInt
+  /// equivalent of the string given by \p str.
+  static unsigned getBitsNeeded(StringRef str, uint8_t radix);
+
+  /// \brief The APInt version of the countLeadingZeros functions in
+  ///   MathExtras.h.
+  ///
+  /// It counts the number of zeros from the most significant bit to the first
+  /// one bit.
+  ///
+  /// \returns BitWidth if the value is zero, otherwise returns the number of
+  ///   zeros from the most significant bit to the first one bits.
+  unsigned countLeadingZeros() const {
+    if (isSingleWord()) {
+      unsigned unusedBits = APINT_BITS_PER_WORD - BitWidth;
+      return llvm::countLeadingZeros(U.VAL) - unusedBits;
+    }
+    return countLeadingZerosSlowCase();
+  }
+
+  /// \brief Count the number of leading one bits.
+  ///
+  /// This function is an APInt version of the countLeadingOnes
+  /// functions in MathExtras.h. It counts the number of ones from the most
+  /// significant bit to the first zero bit.
+  ///
+  /// \returns 0 if the high order bit is not set, otherwise returns the number
+  /// of 1 bits from the most significant to the least
+  unsigned countLeadingOnes() const {
+    if (isSingleWord())
+      return llvm::countLeadingOnes(U.VAL << (APINT_BITS_PER_WORD - BitWidth));
+    return countLeadingOnesSlowCase();
+  }
+
+  /// Computes the number of leading bits of this APInt that are equal to its
+  /// sign bit.
+  unsigned getNumSignBits() const {
+    return isNegative() ? countLeadingOnes() : countLeadingZeros();
+  }
+
+  /// \brief Count the number of trailing zero bits.
+  ///
+  /// This function is an APInt version of the countTrailingZeros
+  /// functions in MathExtras.h. It counts the number of zeros from the least
+  /// significant bit to the first set bit.
+  ///
+  /// \returns BitWidth if the value is zero, otherwise returns the number of
+  /// zeros from the least significant bit to the first one bit.
+  unsigned countTrailingZeros() const {
+    if (isSingleWord())
+      return std::min(unsigned(llvm::countTrailingZeros(U.VAL)), BitWidth);
+    return countTrailingZerosSlowCase();
+  }
+
+  /// \brief Count the number of trailing one bits.
+  ///
+  /// This function is an APInt version of the countTrailingOnes
+  /// functions in MathExtras.h. It counts the number of ones from the least
+  /// significant bit to the first zero bit.
+  ///
+  /// \returns BitWidth if the value is all ones, otherwise returns the number
+  /// of ones from the least significant bit to the first zero bit.
+  unsigned countTrailingOnes() const {
+    if (isSingleWord())
+      return llvm::countTrailingOnes(U.VAL);
+    return countTrailingOnesSlowCase();
+  }
+
+  /// \brief Count the number of bits set.
+  ///
+  /// This function is an APInt version of the countPopulation functions
+  /// in MathExtras.h. It counts the number of 1 bits in the APInt value.
+  ///
+  /// \returns 0 if the value is zero, otherwise returns the number of set bits.
+  unsigned countPopulation() const {
+    if (isSingleWord())
+      return llvm::countPopulation(U.VAL);
+    return countPopulationSlowCase();
+  }
+
+  /// @}
+  /// \name Conversion Functions
+  /// @{
+  void print(raw_ostream &OS, bool isSigned) const;
+
+  /// Converts an APInt to a string and append it to Str.  Str is commonly a
+  /// SmallString.
+  void toString(SmallVectorImpl<char> &Str, unsigned Radix, bool Signed,
+                bool formatAsCLiteral = false) const;
+
+  /// Considers the APInt to be unsigned and converts it into a string in the
+  /// radix given. The radix can be 2, 8, 10 16, or 36.
+  void toStringUnsigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
+    toString(Str, Radix, false, false);
+  }
+
+  /// Considers the APInt to be signed and converts it into a string in the
+  /// radix given. The radix can be 2, 8, 10, 16, or 36.
+  void toStringSigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
+    toString(Str, Radix, true, false);
+  }
+
+  /// \brief Return the APInt as a std::string.
+  ///
+  /// Note that this is an inefficient method.  It is better to pass in a
+  /// SmallVector/SmallString to the methods above to avoid thrashing the heap
+  /// for the string.
+  std::string toString(unsigned Radix, bool Signed) const;
+
+  /// \returns a byte-swapped representation of this APInt Value.
+  APInt byteSwap() const;
+
+  /// \returns the value with the bit representation reversed of this APInt
+  /// Value.
+  APInt reverseBits() const;
+
+  /// \brief Converts this APInt to a double value.
+  double roundToDouble(bool isSigned) const;
+
+  /// \brief Converts this unsigned APInt to a double value.
+  double roundToDouble() const { return roundToDouble(false); }
+
+  /// \brief Converts this signed APInt to a double value.
+  double signedRoundToDouble() const { return roundToDouble(true); }
+
+  /// \brief Converts APInt bits to a double
+  ///
+  /// The conversion does not do a translation from integer to double, it just
+  /// re-interprets the bits as a double. Note that it is valid to do this on
+  /// any bit width. Exactly 64 bits will be translated.
+  double bitsToDouble() const {
+    return BitsToDouble(getWord(0));
+  }
+
+  /// \brief Converts APInt bits to a double
+  ///
+  /// The conversion does not do a translation from integer to float, it just
+  /// re-interprets the bits as a float. Note that it is valid to do this on
+  /// any bit width. Exactly 32 bits will be translated.
+  float bitsToFloat() const {
+    return BitsToFloat(getWord(0));
+  }
+
+  /// \brief Converts a double to APInt bits.
+  ///
+  /// The conversion does not do a translation from double to integer, it just
+  /// re-interprets the bits of the double.
+  static APInt doubleToBits(double V) {
+    return APInt(sizeof(double) * CHAR_BIT, DoubleToBits(V));
+  }
+
+  /// \brief Converts a float to APInt bits.
+  ///
+  /// The conversion does not do a translation from float to integer, it just
+  /// re-interprets the bits of the float.
+  static APInt floatToBits(float V) {
+    return APInt(sizeof(float) * CHAR_BIT, FloatToBits(V));
+  }
+
+  /// @}
+  /// \name Mathematics Operations
+  /// @{
+
+  /// \returns the floor log base 2 of this APInt.
+  unsigned logBase2() const { return getActiveBits() -  1; }
+
+  /// \returns the ceil log base 2 of this APInt.
+  unsigned ceilLogBase2() const {
+    APInt temp(*this);
+    --temp;
+    return temp.getActiveBits();
+  }
+
+  /// \returns the nearest log base 2 of this APInt. Ties round up.
+  ///
+  /// NOTE: When we have a BitWidth of 1, we define:
+  ///
+  ///   log2(0) = UINT32_MAX
+  ///   log2(1) = 0
+  ///
+  /// to get around any mathematical concerns resulting from
+  /// referencing 2 in a space where 2 does no exist.
+  unsigned nearestLogBase2() const {
+    // Special case when we have a bitwidth of 1. If VAL is 1, then we
+    // get 0. If VAL is 0, we get WORD_MAX which gets truncated to
+    // UINT32_MAX.
+    if (BitWidth == 1)
+      return U.VAL - 1;
+
+    // Handle the zero case.
+    if (isNullValue())
+      return UINT32_MAX;
+
+    // The non-zero case is handled by computing:
+    //
+    //   nearestLogBase2(x) = logBase2(x) + x[logBase2(x)-1].
+    //
+    // where x[i] is referring to the value of the ith bit of x.
+    unsigned lg = logBase2();
+    return lg + unsigned((*this)[lg - 1]);
+  }
+
+  /// \returns the log base 2 of this APInt if its an exact power of two, -1
+  /// otherwise
+  int32_t exactLogBase2() const {
+    if (!isPowerOf2())
+      return -1;
+    return logBase2();
+  }
+
+  /// \brief Compute the square root
+  APInt sqrt() const;
+
+  /// \brief Get the absolute value;
+  ///
+  /// If *this is < 0 then return -(*this), otherwise *this;
+  APInt abs() const {
+    if (isNegative())
+      return -(*this);
+    return *this;
+  }
+
+  /// \returns the multiplicative inverse for a given modulo.
+  APInt multiplicativeInverse(const APInt &modulo) const;
+
+  /// @}
+  /// \name Support for division by constant
+  /// @{
+
+  /// Calculate the magic number for signed division by a constant.
+  struct ms;
+  ms magic() const;
+
+  /// Calculate the magic number for unsigned division by a constant.
+  struct mu;
+  mu magicu(unsigned LeadingZeros = 0) const;
+
+  /// @}
+  /// \name Building-block Operations for APInt and APFloat
+  /// @{
+
+  // These building block operations operate on a representation of arbitrary
+  // precision, two's-complement, bignum integer values. They should be
+  // sufficient to implement APInt and APFloat bignum requirements. Inputs are
+  // generally a pointer to the base of an array of integer parts, representing
+  // an unsigned bignum, and a count of how many parts there are.
+
+  /// Sets the least significant part of a bignum to the input value, and zeroes
+  /// out higher parts.
+  static void tcSet(WordType *, WordType, unsigned);
+
+  /// Assign one bignum to another.
+  static void tcAssign(WordType *, const WordType *, unsigned);
+
+  /// Returns true if a bignum is zero, false otherwise.
+  static bool tcIsZero(const WordType *, unsigned);
+
+  /// Extract the given bit of a bignum; returns 0 or 1.  Zero-based.
+  static int tcExtractBit(const WordType *, unsigned bit);
+
+  /// Copy the bit vector of width srcBITS from SRC, starting at bit srcLSB, to
+  /// DST, of dstCOUNT parts, such that the bit srcLSB becomes the least
+  /// significant bit of DST.  All high bits above srcBITS in DST are
+  /// zero-filled.
+  static void tcExtract(WordType *, unsigned dstCount,
+                        const WordType *, unsigned srcBits,
+                        unsigned srcLSB);
+
+  /// Set the given bit of a bignum.  Zero-based.
+  static void tcSetBit(WordType *, unsigned bit);
+
+  /// Clear the given bit of a bignum.  Zero-based.
+  static void tcClearBit(WordType *, unsigned bit);
+
+  /// Returns the bit number of the least or most significant set bit of a
+  /// number.  If the input number has no bits set -1U is returned.
+  static unsigned tcLSB(const WordType *, unsigned n);
+  static unsigned tcMSB(const WordType *parts, unsigned n);
+
+  /// Negate a bignum in-place.
+  static void tcNegate(WordType *, unsigned);
+
+  /// DST += RHS + CARRY where CARRY is zero or one.  Returns the carry flag.
+  static WordType tcAdd(WordType *, const WordType *,
+                        WordType carry, unsigned);
+  /// DST += RHS.  Returns the carry flag.
+  static WordType tcAddPart(WordType *, WordType, unsigned);
+
+  /// DST -= RHS + CARRY where CARRY is zero or one. Returns the carry flag.
+  static WordType tcSubtract(WordType *, const WordType *,
+                             WordType carry, unsigned);
+  /// DST -= RHS.  Returns the carry flag.
+  static WordType tcSubtractPart(WordType *, WordType, unsigned);
+
+  /// DST += SRC * MULTIPLIER + PART   if add is true
+  /// DST  = SRC * MULTIPLIER + PART   if add is false
+  ///
+  /// Requires 0 <= DSTPARTS <= SRCPARTS + 1.  If DST overlaps SRC they must
+  /// start at the same point, i.e. DST == SRC.
+  ///
+  /// If DSTPARTS == SRC_PARTS + 1 no overflow occurs and zero is returned.
+  /// Otherwise DST is filled with the least significant DSTPARTS parts of the
+  /// result, and if all of the omitted higher parts were zero return zero,
+  /// otherwise overflow occurred and return one.
+  static int tcMultiplyPart(WordType *dst, const WordType *src,
+                            WordType multiplier, WordType carry,
+                            unsigned srcParts, unsigned dstParts,
+                            bool add);
+
+  /// DST = LHS * RHS, where DST has the same width as the operands and is
+  /// filled with the least significant parts of the result.  Returns one if
+  /// overflow occurred, otherwise zero.  DST must be disjoint from both
+  /// operands.
+  static int tcMultiply(WordType *, const WordType *, const WordType *,
+                        unsigned);
+
+  /// DST = LHS * RHS, where DST has width the sum of the widths of the
+  /// operands. No overflow occurs. DST must be disjoint from both operands.
+  static void tcFullMultiply(WordType *, const WordType *,
+                             const WordType *, unsigned, unsigned);
+
+  /// If RHS is zero LHS and REMAINDER are left unchanged, return one.
+  /// Otherwise set LHS to LHS / RHS with the fractional part discarded, set
+  /// REMAINDER to the remainder, return zero.  i.e.
+  ///
+  ///  OLD_LHS = RHS * LHS + REMAINDER
+  ///
+  /// SCRATCH is a bignum of the same size as the operands and result for use by
+  /// the routine; its contents need not be initialized and are destroyed.  LHS,
+  /// REMAINDER and SCRATCH must be distinct.
+  static int tcDivide(WordType *lhs, const WordType *rhs,
+                      WordType *remainder, WordType *scratch,
+                      unsigned parts);
+
+  /// Shift a bignum left Count bits. Shifted in bits are zero. There are no
+  /// restrictions on Count.
+  static void tcShiftLeft(WordType *, unsigned Words, unsigned Count);
+
+  /// Shift a bignum right Count bits.  Shifted in bits are zero.  There are no
+  /// restrictions on Count.
+  static void tcShiftRight(WordType *, unsigned Words, unsigned Count);
+
+  /// The obvious AND, OR and XOR and complement operations.
+  static void tcAnd(WordType *, const WordType *, unsigned);
+  static void tcOr(WordType *, const WordType *, unsigned);
+  static void tcXor(WordType *, const WordType *, unsigned);
+  static void tcComplement(WordType *, unsigned);
+
+  /// Comparison (unsigned) of two bignums.
+  static int tcCompare(const WordType *, const WordType *, unsigned);
+
+  /// Increment a bignum in-place.  Return the carry flag.
+  static WordType tcIncrement(WordType *dst, unsigned parts) {
+    return tcAddPart(dst, 1, parts);
+  }
+
+  /// Decrement a bignum in-place.  Return the borrow flag.
+  static WordType tcDecrement(WordType *dst, unsigned parts) {
+    return tcSubtractPart(dst, 1, parts);
+  }
+
+  /// Set the least significant BITS and clear the rest.
+  static void tcSetLeastSignificantBits(WordType *, unsigned, unsigned bits);
+
+  /// \brief debug method
+  void dump() const;
+
+  /// @}
+};
+
+/// Magic data for optimising signed division by a constant.
+struct APInt::ms {
+  APInt m;    ///< magic number
+  unsigned s; ///< shift amount
+};
+
+/// Magic data for optimising unsigned division by a constant.
+struct APInt::mu {
+  APInt m;    ///< magic number
+  bool a;     ///< add indicator
+  unsigned s; ///< shift amount
+};
+
+inline bool operator==(uint64_t V1, const APInt &V2) { return V2 == V1; }
+
+inline bool operator!=(uint64_t V1, const APInt &V2) { return V2 != V1; }
+
+/// \brief Unary bitwise complement operator.
+///
+/// \returns an APInt that is the bitwise complement of \p v.
+inline APInt operator~(APInt v) {
+  v.flipAllBits();
+  return v;
+}
+
+inline APInt operator&(APInt a, const APInt &b) {
+  a &= b;
+  return a;
+}
+
+inline APInt operator&(const APInt &a, APInt &&b) {
+  b &= a;
+  return std::move(b);
+}
+
+inline APInt operator&(APInt a, uint64_t RHS) {
+  a &= RHS;
+  return a;
+}
+
+inline APInt operator&(uint64_t LHS, APInt b) {
+  b &= LHS;
+  return b;
+}
+
+inline APInt operator|(APInt a, const APInt &b) {
+  a |= b;
+  return a;
+}
+
+inline APInt operator|(const APInt &a, APInt &&b) {
+  b |= a;
+  return std::move(b);
+}
+
+inline APInt operator|(APInt a, uint64_t RHS) {
+  a |= RHS;
+  return a;
+}
+
+inline APInt operator|(uint64_t LHS, APInt b) {
+  b |= LHS;
+  return b;
+}
+
+inline APInt operator^(APInt a, const APInt &b) {
+  a ^= b;
+  return a;
+}
+
+inline APInt operator^(const APInt &a, APInt &&b) {
+  b ^= a;
+  return std::move(b);
+}
+
+inline APInt operator^(APInt a, uint64_t RHS) {
+  a ^= RHS;
+  return a;
+}
+
+inline APInt operator^(uint64_t LHS, APInt b) {
+  b ^= LHS;
+  return b;
+}
+
+inline raw_ostream &operator<<(raw_ostream &OS, const APInt &I) {
+  I.print(OS, true);
+  return OS;
+}
+
+inline APInt operator-(APInt v) {
+  v.negate();
+  return v;
+}
+
+inline APInt operator+(APInt a, const APInt &b) {
+  a += b;
+  return a;
+}
+
+inline APInt operator+(const APInt &a, APInt &&b) {
+  b += a;
+  return std::move(b);
+}
+
+inline APInt operator+(APInt a, uint64_t RHS) {
+  a += RHS;
+  return a;
+}
+
+inline APInt operator+(uint64_t LHS, APInt b) {
+  b += LHS;
+  return b;
+}
+
+inline APInt operator-(APInt a, const APInt &b) {
+  a -= b;
+  return a;
+}
+
+inline APInt operator-(const APInt &a, APInt &&b) {
+  b.negate();
+  b += a;
+  return std::move(b);
+}
+
+inline APInt operator-(APInt a, uint64_t RHS) {
+  a -= RHS;
+  return a;
+}
+
+inline APInt operator-(uint64_t LHS, APInt b) {
+  b.negate();
+  b += LHS;
+  return b;
+}
+
+inline APInt operator*(APInt a, uint64_t RHS) {
+  a *= RHS;
+  return a;
+}
+
+inline APInt operator*(uint64_t LHS, APInt b) {
+  b *= LHS;
+  return b;
+}
+
+
+namespace APIntOps {
+
+/// \brief Determine the smaller of two APInts considered to be signed.
+inline const APInt &smin(const APInt &A, const APInt &B) {
+  return A.slt(B) ? A : B;
+}
+
+/// \brief Determine the larger of two APInts considered to be signed.
+inline const APInt &smax(const APInt &A, const APInt &B) {
+  return A.sgt(B) ? A : B;
+}
+
+/// \brief Determine the smaller of two APInts considered to be signed.
+inline const APInt &umin(const APInt &A, const APInt &B) {
+  return A.ult(B) ? A : B;
+}
+
+/// \brief Determine the larger of two APInts considered to be unsigned.
+inline const APInt &umax(const APInt &A, const APInt &B) {
+  return A.ugt(B) ? A : B;
+}
+
+/// \brief Compute GCD of two unsigned APInt values.
+///
+/// This function returns the greatest common divisor of the two APInt values
+/// using Stein's algorithm.
+///
+/// \returns the greatest common divisor of A and B.
+APInt GreatestCommonDivisor(APInt A, APInt B);
+
+/// \brief Converts the given APInt to a double value.
+///
+/// Treats the APInt as an unsigned value for conversion purposes.
+inline double RoundAPIntToDouble(const APInt &APIVal) {
+  return APIVal.roundToDouble();
+}
+
+/// \brief Converts the given APInt to a double value.
+///
+/// Treats the APInt as a signed value for conversion purposes.
+inline double RoundSignedAPIntToDouble(const APInt &APIVal) {
+  return APIVal.signedRoundToDouble();
+}
+
+/// \brief Converts the given APInt to a float vlalue.
+inline float RoundAPIntToFloat(const APInt &APIVal) {
+  return float(RoundAPIntToDouble(APIVal));
+}
+
+/// \brief Converts the given APInt to a float value.
+///
+/// Treast the APInt as a signed value for conversion purposes.
+inline float RoundSignedAPIntToFloat(const APInt &APIVal) {
+  return float(APIVal.signedRoundToDouble());
+}
+
+/// \brief Converts the given double value into a APInt.
+///
+/// This function convert a double value to an APInt value.
+APInt RoundDoubleToAPInt(double Double, unsigned width);
+
+/// \brief Converts a float value into a APInt.
+///
+/// Converts a float value into an APInt value.
+inline APInt RoundFloatToAPInt(float Float, unsigned width) {
+  return RoundDoubleToAPInt(double(Float), width);
+}
+
+} // End of APIntOps namespace
+
+// See friend declaration above. This additional declaration is required in
+// order to compile LLVM with IBM xlC compiler.
+hash_code hash_value(const APInt &Arg);
+} // End of llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/APSInt.h b/linux-x64/clang/include/llvm/ADT/APSInt.h
new file mode 100644
index 0000000..dabbf33
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/APSInt.h
@@ -0,0 +1,336 @@
+//===-- llvm/ADT/APSInt.h - Arbitrary Precision Signed Int -----*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the APSInt class, which is a simple class that
+// represents an arbitrary sized integer that knows its signedness.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_APSINT_H
+#define LLVM_ADT_APSINT_H
+
+#include "llvm/ADT/APInt.h"
+
+namespace llvm {
+
+class LLVM_NODISCARD APSInt : public APInt {
+  bool IsUnsigned;
+
+public:
+  /// Default constructor that creates an uninitialized APInt.
+  explicit APSInt() : IsUnsigned(false) {}
+
+  /// APSInt ctor - Create an APSInt with the specified width, default to
+  /// unsigned.
+  explicit APSInt(uint32_t BitWidth, bool isUnsigned = true)
+   : APInt(BitWidth, 0), IsUnsigned(isUnsigned) {}
+
+  explicit APSInt(APInt I, bool isUnsigned = true)
+   : APInt(std::move(I)), IsUnsigned(isUnsigned) {}
+
+  /// Construct an APSInt from a string representation.
+  ///
+  /// This constructor interprets the string \p Str using the radix of 10.
+  /// The interpretation stops at the end of the string. The bit width of the
+  /// constructed APSInt is determined automatically.
+  ///
+  /// \param Str the string to be interpreted.
+  explicit APSInt(StringRef Str);
+
+  APSInt &operator=(APInt RHS) {
+    // Retain our current sign.
+    APInt::operator=(std::move(RHS));
+    return *this;
+  }
+
+  APSInt &operator=(uint64_t RHS) {
+    // Retain our current sign.
+    APInt::operator=(RHS);
+    return *this;
+  }
+
+  // Query sign information.
+  bool isSigned() const { return !IsUnsigned; }
+  bool isUnsigned() const { return IsUnsigned; }
+  void setIsUnsigned(bool Val) { IsUnsigned = Val; }
+  void setIsSigned(bool Val) { IsUnsigned = !Val; }
+
+  /// toString - Append this APSInt to the specified SmallString.
+  void toString(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
+    APInt::toString(Str, Radix, isSigned());
+  }
+  /// toString - Converts an APInt to a std::string.  This is an inefficient
+  /// method; you should prefer passing in a SmallString instead.
+  std::string toString(unsigned Radix) const {
+    return APInt::toString(Radix, isSigned());
+  }
+  using APInt::toString;
+
+  /// \brief Get the correctly-extended \c int64_t value.
+  int64_t getExtValue() const {
+    assert(getMinSignedBits() <= 64 && "Too many bits for int64_t");
+    return isSigned() ? getSExtValue() : getZExtValue();
+  }
+
+  APSInt trunc(uint32_t width) const {
+    return APSInt(APInt::trunc(width), IsUnsigned);
+  }
+
+  APSInt extend(uint32_t width) const {
+    if (IsUnsigned)
+      return APSInt(zext(width), IsUnsigned);
+    else
+      return APSInt(sext(width), IsUnsigned);
+  }
+
+  APSInt extOrTrunc(uint32_t width) const {
+    if (IsUnsigned)
+      return APSInt(zextOrTrunc(width), IsUnsigned);
+    else
+      return APSInt(sextOrTrunc(width), IsUnsigned);
+  }
+
+  const APSInt &operator%=(const APSInt &RHS) {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    if (IsUnsigned)
+      *this = urem(RHS);
+    else
+      *this = srem(RHS);
+    return *this;
+  }
+  const APSInt &operator/=(const APSInt &RHS) {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    if (IsUnsigned)
+      *this = udiv(RHS);
+    else
+      *this = sdiv(RHS);
+    return *this;
+  }
+  APSInt operator%(const APSInt &RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return IsUnsigned ? APSInt(urem(RHS), true) : APSInt(srem(RHS), false);
+  }
+  APSInt operator/(const APSInt &RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return IsUnsigned ? APSInt(udiv(RHS), true) : APSInt(sdiv(RHS), false);
+  }
+
+  APSInt operator>>(unsigned Amt) const {
+    return IsUnsigned ? APSInt(lshr(Amt), true) : APSInt(ashr(Amt), false);
+  }
+  APSInt& operator>>=(unsigned Amt) {
+    if (IsUnsigned)
+      lshrInPlace(Amt);
+    else
+      ashrInPlace(Amt);
+    return *this;
+  }
+
+  inline bool operator<(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return IsUnsigned ? ult(RHS) : slt(RHS);
+  }
+  inline bool operator>(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return IsUnsigned ? ugt(RHS) : sgt(RHS);
+  }
+  inline bool operator<=(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return IsUnsigned ? ule(RHS) : sle(RHS);
+  }
+  inline bool operator>=(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return IsUnsigned ? uge(RHS) : sge(RHS);
+  }
+  inline bool operator==(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return eq(RHS);
+  }
+  inline bool operator!=(const APSInt& RHS) const {
+    return !((*this) == RHS);
+  }
+
+  bool operator==(int64_t RHS) const {
+    return compareValues(*this, get(RHS)) == 0;
+  }
+  bool operator!=(int64_t RHS) const {
+    return compareValues(*this, get(RHS)) != 0;
+  }
+  bool operator<=(int64_t RHS) const {
+    return compareValues(*this, get(RHS)) <= 0;
+  }
+  bool operator>=(int64_t RHS) const {
+    return compareValues(*this, get(RHS)) >= 0;
+  }
+  bool operator<(int64_t RHS) const {
+    return compareValues(*this, get(RHS)) < 0;
+  }
+  bool operator>(int64_t RHS) const {
+    return compareValues(*this, get(RHS)) > 0;
+  }
+
+  // The remaining operators just wrap the logic of APInt, but retain the
+  // signedness information.
+
+  APSInt operator<<(unsigned Bits) const {
+    return APSInt(static_cast<const APInt&>(*this) << Bits, IsUnsigned);
+  }
+  APSInt& operator<<=(unsigned Amt) {
+    static_cast<APInt&>(*this) <<= Amt;
+    return *this;
+  }
+
+  APSInt& operator++() {
+    ++(static_cast<APInt&>(*this));
+    return *this;
+  }
+  APSInt& operator--() {
+    --(static_cast<APInt&>(*this));
+    return *this;
+  }
+  APSInt operator++(int) {
+    return APSInt(++static_cast<APInt&>(*this), IsUnsigned);
+  }
+  APSInt operator--(int) {
+    return APSInt(--static_cast<APInt&>(*this), IsUnsigned);
+  }
+  APSInt operator-() const {
+    return APSInt(-static_cast<const APInt&>(*this), IsUnsigned);
+  }
+  APSInt& operator+=(const APSInt& RHS) {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    static_cast<APInt&>(*this) += RHS;
+    return *this;
+  }
+  APSInt& operator-=(const APSInt& RHS) {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    static_cast<APInt&>(*this) -= RHS;
+    return *this;
+  }
+  APSInt& operator*=(const APSInt& RHS) {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    static_cast<APInt&>(*this) *= RHS;
+    return *this;
+  }
+  APSInt& operator&=(const APSInt& RHS) {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    static_cast<APInt&>(*this) &= RHS;
+    return *this;
+  }
+  APSInt& operator|=(const APSInt& RHS) {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    static_cast<APInt&>(*this) |= RHS;
+    return *this;
+  }
+  APSInt& operator^=(const APSInt& RHS) {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    static_cast<APInt&>(*this) ^= RHS;
+    return *this;
+  }
+
+  APSInt operator&(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return APSInt(static_cast<const APInt&>(*this) & RHS, IsUnsigned);
+  }
+
+  APSInt operator|(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return APSInt(static_cast<const APInt&>(*this) | RHS, IsUnsigned);
+  }
+
+  APSInt operator^(const APSInt &RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return APSInt(static_cast<const APInt&>(*this) ^ RHS, IsUnsigned);
+  }
+
+  APSInt operator*(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return APSInt(static_cast<const APInt&>(*this) * RHS, IsUnsigned);
+  }
+  APSInt operator+(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return APSInt(static_cast<const APInt&>(*this) + RHS, IsUnsigned);
+  }
+  APSInt operator-(const APSInt& RHS) const {
+    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+    return APSInt(static_cast<const APInt&>(*this) - RHS, IsUnsigned);
+  }
+  APSInt operator~() const {
+    return APSInt(~static_cast<const APInt&>(*this), IsUnsigned);
+  }
+
+  /// getMaxValue - Return the APSInt representing the maximum integer value
+  ///  with the given bit width and signedness.
+  static APSInt getMaxValue(uint32_t numBits, bool Unsigned) {
+    return APSInt(Unsigned ? APInt::getMaxValue(numBits)
+                           : APInt::getSignedMaxValue(numBits), Unsigned);
+  }
+
+  /// getMinValue - Return the APSInt representing the minimum integer value
+  ///  with the given bit width and signedness.
+  static APSInt getMinValue(uint32_t numBits, bool Unsigned) {
+    return APSInt(Unsigned ? APInt::getMinValue(numBits)
+                           : APInt::getSignedMinValue(numBits), Unsigned);
+  }
+
+  /// \brief Determine if two APSInts have the same value, zero- or
+  /// sign-extending as needed.
+  static bool isSameValue(const APSInt &I1, const APSInt &I2) {
+    return !compareValues(I1, I2);
+  }
+
+  /// \brief Compare underlying values of two numbers.
+  static int compareValues(const APSInt &I1, const APSInt &I2) {
+    if (I1.getBitWidth() == I2.getBitWidth() && I1.isSigned() == I2.isSigned())
+      return I1.IsUnsigned ? I1.compare(I2) : I1.compareSigned(I2);
+
+    // Check for a bit-width mismatch.
+    if (I1.getBitWidth() > I2.getBitWidth())
+      return compareValues(I1, I2.extend(I1.getBitWidth()));
+    if (I2.getBitWidth() > I1.getBitWidth())
+      return compareValues(I1.extend(I2.getBitWidth()), I2);
+
+    // We have a signedness mismatch. Check for negative values and do an
+    // unsigned compare if both are positive.
+    if (I1.isSigned()) {
+      assert(!I2.isSigned() && "Expected signed mismatch");
+      if (I1.isNegative())
+        return -1;
+    } else {
+      assert(I2.isSigned() && "Expected signed mismatch");
+      if (I2.isNegative())
+        return 1;
+    }
+
+    return I1.compare(I2);
+  }
+
+  static APSInt get(int64_t X) { return APSInt(APInt(64, X), false); }
+  static APSInt getUnsigned(uint64_t X) { return APSInt(APInt(64, X), true); }
+
+  /// Profile - Used to insert APSInt objects, or objects that contain APSInt
+  ///  objects, into FoldingSets.
+  void Profile(FoldingSetNodeID& ID) const;
+};
+
+inline bool operator==(int64_t V1, const APSInt &V2) { return V2 == V1; }
+inline bool operator!=(int64_t V1, const APSInt &V2) { return V2 != V1; }
+inline bool operator<=(int64_t V1, const APSInt &V2) { return V2 >= V1; }
+inline bool operator>=(int64_t V1, const APSInt &V2) { return V2 <= V1; }
+inline bool operator<(int64_t V1, const APSInt &V2) { return V2 > V1; }
+inline bool operator>(int64_t V1, const APSInt &V2) { return V2 < V1; }
+
+inline raw_ostream &operator<<(raw_ostream &OS, const APSInt &I) {
+  I.print(OS, I.isSigned());
+  return OS;
+}
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/AllocatorList.h b/linux-x64/clang/include/llvm/ADT/AllocatorList.h
new file mode 100644
index 0000000..178c674
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/AllocatorList.h
@@ -0,0 +1,241 @@
+//===- llvm/ADT/AllocatorList.h - Custom allocator list ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ALLOCATORLIST_H
+#define LLVM_ADT_ALLOCATORLIST_H
+
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/simple_ilist.h"
+#include "llvm/Support/Allocator.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+/// A linked-list with a custom, local allocator.
+///
+/// Expose a std::list-like interface that owns and uses a custom LLVM-style
+/// allocator (e.g., BumpPtrAllocator), leveraging \a simple_ilist for the
+/// implementation details.
+///
+/// Because this list owns the allocator, calling \a splice() with a different
+/// list isn't generally safe.  As such, \a splice has been left out of the
+/// interface entirely.
+template <class T, class AllocatorT> class AllocatorList : AllocatorT {
+  struct Node : ilist_node<Node> {
+    Node(Node &&) = delete;
+    Node(const Node &) = delete;
+    Node &operator=(Node &&) = delete;
+    Node &operator=(const Node &) = delete;
+
+    Node(T &&V) : V(std::move(V)) {}
+    Node(const T &V) : V(V) {}
+    template <class... Ts> Node(Ts &&... Vs) : V(std::forward<Ts>(Vs)...) {}
+    T V;
+  };
+
+  using list_type = simple_ilist<Node>;
+
+  list_type List;
+
+  AllocatorT &getAlloc() { return *this; }
+  const AllocatorT &getAlloc() const { return *this; }
+
+  template <class... ArgTs> Node *create(ArgTs &&... Args) {
+    return new (getAlloc()) Node(std::forward<ArgTs>(Args)...);
+  }
+
+  struct Cloner {
+    AllocatorList &AL;
+
+    Cloner(AllocatorList &AL) : AL(AL) {}
+
+    Node *operator()(const Node &N) const { return AL.create(N.V); }
+  };
+
+  struct Disposer {
+    AllocatorList &AL;
+
+    Disposer(AllocatorList &AL) : AL(AL) {}
+
+    void operator()(Node *N) const {
+      N->~Node();
+      AL.getAlloc().Deallocate(N);
+    }
+  };
+
+public:
+  using value_type = T;
+  using pointer = T *;
+  using reference = T &;
+  using const_pointer = const T *;
+  using const_reference = const T &;
+  using size_type = typename list_type::size_type;
+  using difference_type = typename list_type::difference_type;
+
+private:
+  template <class ValueT, class IteratorBase>
+  class IteratorImpl
+      : public iterator_adaptor_base<IteratorImpl<ValueT, IteratorBase>,
+                                     IteratorBase,
+                                     std::bidirectional_iterator_tag, ValueT> {
+    template <class OtherValueT, class OtherIteratorBase>
+    friend class IteratorImpl;
+    friend AllocatorList;
+
+    using base_type =
+        iterator_adaptor_base<IteratorImpl<ValueT, IteratorBase>, IteratorBase,
+                              std::bidirectional_iterator_tag, ValueT>;
+
+  public:
+    using value_type = ValueT;
+    using pointer = ValueT *;
+    using reference = ValueT &;
+
+    IteratorImpl() = default;
+    IteratorImpl(const IteratorImpl &) = default;
+    IteratorImpl &operator=(const IteratorImpl &) = default;
+
+    explicit IteratorImpl(const IteratorBase &I) : base_type(I) {}
+
+    template <class OtherValueT, class OtherIteratorBase>
+    IteratorImpl(const IteratorImpl<OtherValueT, OtherIteratorBase> &X,
+                 typename std::enable_if<std::is_convertible<
+                     OtherIteratorBase, IteratorBase>::value>::type * = nullptr)
+        : base_type(X.wrapped()) {}
+
+    ~IteratorImpl() = default;
+
+    reference operator*() const { return base_type::wrapped()->V; }
+    pointer operator->() const { return &operator*(); }
+
+    friend bool operator==(const IteratorImpl &L, const IteratorImpl &R) {
+      return L.wrapped() == R.wrapped();
+    }
+    friend bool operator!=(const IteratorImpl &L, const IteratorImpl &R) {
+      return !(L == R);
+    }
+  };
+
+public:
+  using iterator = IteratorImpl<T, typename list_type::iterator>;
+  using reverse_iterator =
+      IteratorImpl<T, typename list_type::reverse_iterator>;
+  using const_iterator =
+      IteratorImpl<const T, typename list_type::const_iterator>;
+  using const_reverse_iterator =
+      IteratorImpl<const T, typename list_type::const_reverse_iterator>;
+
+  AllocatorList() = default;
+  AllocatorList(AllocatorList &&X)
+      : AllocatorT(std::move(X.getAlloc())), List(std::move(X.List)) {}
+
+  AllocatorList(const AllocatorList &X) {
+    List.cloneFrom(X.List, Cloner(*this), Disposer(*this));
+  }
+
+  AllocatorList &operator=(AllocatorList &&X) {
+    clear(); // Dispose of current nodes explicitly.
+    List = std::move(X.List);
+    getAlloc() = std::move(X.getAlloc());
+    return *this;
+  }
+
+  AllocatorList &operator=(const AllocatorList &X) {
+    List.cloneFrom(X.List, Cloner(*this), Disposer(*this));
+    return *this;
+  }
+
+  ~AllocatorList() { clear(); }
+
+  void swap(AllocatorList &RHS) {
+    List.swap(RHS.List);
+    std::swap(getAlloc(), RHS.getAlloc());
+  }
+
+  bool empty() { return List.empty(); }
+  size_t size() { return List.size(); }
+
+  iterator begin() { return iterator(List.begin()); }
+  iterator end() { return iterator(List.end()); }
+  const_iterator begin() const { return const_iterator(List.begin()); }
+  const_iterator end() const { return const_iterator(List.end()); }
+  reverse_iterator rbegin() { return reverse_iterator(List.rbegin()); }
+  reverse_iterator rend() { return reverse_iterator(List.rend()); }
+  const_reverse_iterator rbegin() const {
+    return const_reverse_iterator(List.rbegin());
+  }
+  const_reverse_iterator rend() const {
+    return const_reverse_iterator(List.rend());
+  }
+
+  T &back() { return List.back().V; }
+  T &front() { return List.front().V; }
+  const T &back() const { return List.back().V; }
+  const T &front() const { return List.front().V; }
+
+  template <class... Ts> iterator emplace(iterator I, Ts &&... Vs) {
+    return iterator(List.insert(I.wrapped(), *create(std::forward<Ts>(Vs)...)));
+  }
+
+  iterator insert(iterator I, T &&V) {
+    return iterator(List.insert(I.wrapped(), *create(std::move(V))));
+  }
+  iterator insert(iterator I, const T &V) {
+    return iterator(List.insert(I.wrapped(), *create(V)));
+  }
+
+  template <class Iterator>
+  void insert(iterator I, Iterator First, Iterator Last) {
+    for (; First != Last; ++First)
+      List.insert(I.wrapped(), *create(*First));
+  }
+
+  iterator erase(iterator I) {
+    return iterator(List.eraseAndDispose(I.wrapped(), Disposer(*this)));
+  }
+
+  iterator erase(iterator First, iterator Last) {
+    return iterator(
+        List.eraseAndDispose(First.wrapped(), Last.wrapped(), Disposer(*this)));
+  }
+
+  void clear() { List.clearAndDispose(Disposer(*this)); }
+  void pop_back() { List.eraseAndDispose(--List.end(), Disposer(*this)); }
+  void pop_front() { List.eraseAndDispose(List.begin(), Disposer(*this)); }
+  void push_back(T &&V) { insert(end(), std::move(V)); }
+  void push_front(T &&V) { insert(begin(), std::move(V)); }
+  void push_back(const T &V) { insert(end(), V); }
+  void push_front(const T &V) { insert(begin(), V); }
+  template <class... Ts> void emplace_back(Ts &&... Vs) {
+    emplace(end(), std::forward<Ts>(Vs)...);
+  }
+  template <class... Ts> void emplace_front(Ts &&... Vs) {
+    emplace(begin(), std::forward<Ts>(Vs)...);
+  }
+
+  /// Reset the underlying allocator.
+  ///
+  /// \pre \c empty()
+  void resetAlloc() {
+    assert(empty() && "Cannot reset allocator if not empty");
+    getAlloc().Reset();
+  }
+};
+
+template <class T> using BumpPtrList = AllocatorList<T, BumpPtrAllocator>;
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ALLOCATORLIST_H
diff --git a/linux-x64/clang/include/llvm/ADT/ArrayRef.h b/linux-x64/clang/include/llvm/ADT/ArrayRef.h
new file mode 100644
index 0000000..5f7a769
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ArrayRef.h
@@ -0,0 +1,541 @@
+//===- ArrayRef.h - Array Reference Wrapper ---------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ARRAYREF_H
+#define LLVM_ADT_ARRAYREF_H
+
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
+#include <array>
+#include <cassert>
+#include <cstddef>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+#include <type_traits>
+#include <vector>
+
+namespace llvm {
+
+  /// ArrayRef - Represent a constant reference to an array (0 or more elements
+  /// consecutively in memory), i.e. a start pointer and a length.  It allows
+  /// various APIs to take consecutive elements easily and conveniently.
+  ///
+  /// This class does not own the underlying data, it is expected to be used in
+  /// situations where the data resides in some other buffer, whose lifetime
+  /// extends past that of the ArrayRef. For this reason, it is not in general
+  /// safe to store an ArrayRef.
+  ///
+  /// This is intended to be trivially copyable, so it should be passed by
+  /// value.
+  template<typename T>
+  class LLVM_NODISCARD ArrayRef {
+  public:
+    using iterator = const T *;
+    using const_iterator = const T *;
+    using size_type = size_t;
+    using reverse_iterator = std::reverse_iterator<iterator>;
+
+  private:
+    /// The start of the array, in an external buffer.
+    const T *Data = nullptr;
+
+    /// The number of elements.
+    size_type Length = 0;
+
+  public:
+    /// @name Constructors
+    /// @{
+
+    /// Construct an empty ArrayRef.
+    /*implicit*/ ArrayRef() = default;
+
+    /// Construct an empty ArrayRef from None.
+    /*implicit*/ ArrayRef(NoneType) {}
+
+    /// Construct an ArrayRef from a single element.
+    /*implicit*/ ArrayRef(const T &OneElt)
+      : Data(&OneElt), Length(1) {}
+
+    /// Construct an ArrayRef from a pointer and length.
+    /*implicit*/ ArrayRef(const T *data, size_t length)
+      : Data(data), Length(length) {}
+
+    /// Construct an ArrayRef from a range.
+    ArrayRef(const T *begin, const T *end)
+      : Data(begin), Length(end - begin) {}
+
+    /// Construct an ArrayRef from a SmallVector. This is templated in order to
+    /// avoid instantiating SmallVectorTemplateCommon<T> whenever we
+    /// copy-construct an ArrayRef.
+    template<typename U>
+    /*implicit*/ ArrayRef(const SmallVectorTemplateCommon<T, U> &Vec)
+      : Data(Vec.data()), Length(Vec.size()) {
+    }
+
+    /// Construct an ArrayRef from a std::vector.
+    template<typename A>
+    /*implicit*/ ArrayRef(const std::vector<T, A> &Vec)
+      : Data(Vec.data()), Length(Vec.size()) {}
+
+    /// Construct an ArrayRef from a std::array
+    template <size_t N>
+    /*implicit*/ constexpr ArrayRef(const std::array<T, N> &Arr)
+        : Data(Arr.data()), Length(N) {}
+
+    /// Construct an ArrayRef from a C array.
+    template <size_t N>
+    /*implicit*/ constexpr ArrayRef(const T (&Arr)[N]) : Data(Arr), Length(N) {}
+
+    /// Construct an ArrayRef from a std::initializer_list.
+    /*implicit*/ ArrayRef(const std::initializer_list<T> &Vec)
+    : Data(Vec.begin() == Vec.end() ? (T*)nullptr : Vec.begin()),
+      Length(Vec.size()) {}
+
+    /// Construct an ArrayRef<const T*> from ArrayRef<T*>. This uses SFINAE to
+    /// ensure that only ArrayRefs of pointers can be converted.
+    template <typename U>
+    ArrayRef(
+        const ArrayRef<U *> &A,
+        typename std::enable_if<
+           std::is_convertible<U *const *, T const *>::value>::type * = nullptr)
+      : Data(A.data()), Length(A.size()) {}
+
+    /// Construct an ArrayRef<const T*> from a SmallVector<T*>. This is
+    /// templated in order to avoid instantiating SmallVectorTemplateCommon<T>
+    /// whenever we copy-construct an ArrayRef.
+    template<typename U, typename DummyT>
+    /*implicit*/ ArrayRef(
+      const SmallVectorTemplateCommon<U *, DummyT> &Vec,
+      typename std::enable_if<
+          std::is_convertible<U *const *, T const *>::value>::type * = nullptr)
+      : Data(Vec.data()), Length(Vec.size()) {
+    }
+
+    /// Construct an ArrayRef<const T*> from std::vector<T*>. This uses SFINAE
+    /// to ensure that only vectors of pointers can be converted.
+    template<typename U, typename A>
+    ArrayRef(const std::vector<U *, A> &Vec,
+             typename std::enable_if<
+                 std::is_convertible<U *const *, T const *>::value>::type* = 0)
+      : Data(Vec.data()), Length(Vec.size()) {}
+
+    /// @}
+    /// @name Simple Operations
+    /// @{
+
+    iterator begin() const { return Data; }
+    iterator end() const { return Data + Length; }
+
+    reverse_iterator rbegin() const { return reverse_iterator(end()); }
+    reverse_iterator rend() const { return reverse_iterator(begin()); }
+
+    /// empty - Check if the array is empty.
+    bool empty() const { return Length == 0; }
+
+    const T *data() const { return Data; }
+
+    /// size - Get the array size.
+    size_t size() const { return Length; }
+
+    /// front - Get the first element.
+    const T &front() const {
+      assert(!empty());
+      return Data[0];
+    }
+
+    /// back - Get the last element.
+    const T &back() const {
+      assert(!empty());
+      return Data[Length-1];
+    }
+
+    // copy - Allocate copy in Allocator and return ArrayRef<T> to it.
+    template <typename Allocator> ArrayRef<T> copy(Allocator &A) {
+      T *Buff = A.template Allocate<T>(Length);
+      std::uninitialized_copy(begin(), end(), Buff);
+      return ArrayRef<T>(Buff, Length);
+    }
+
+    /// equals - Check for element-wise equality.
+    bool equals(ArrayRef RHS) const {
+      if (Length != RHS.Length)
+        return false;
+      return std::equal(begin(), end(), RHS.begin());
+    }
+
+    /// slice(n, m) - Chop off the first N elements of the array, and keep M
+    /// elements in the array.
+    ArrayRef<T> slice(size_t N, size_t M) const {
+      assert(N+M <= size() && "Invalid specifier");
+      return ArrayRef<T>(data()+N, M);
+    }
+
+    /// slice(n) - Chop off the first N elements of the array.
+    ArrayRef<T> slice(size_t N) const { return slice(N, size() - N); }
+
+    /// \brief Drop the first \p N elements of the array.
+    ArrayRef<T> drop_front(size_t N = 1) const {
+      assert(size() >= N && "Dropping more elements than exist");
+      return slice(N, size() - N);
+    }
+
+    /// \brief Drop the last \p N elements of the array.
+    ArrayRef<T> drop_back(size_t N = 1) const {
+      assert(size() >= N && "Dropping more elements than exist");
+      return slice(0, size() - N);
+    }
+
+    /// \brief Return a copy of *this with the first N elements satisfying the
+    /// given predicate removed.
+    template <class PredicateT> ArrayRef<T> drop_while(PredicateT Pred) const {
+      return ArrayRef<T>(find_if_not(*this, Pred), end());
+    }
+
+    /// \brief Return a copy of *this with the first N elements not satisfying
+    /// the given predicate removed.
+    template <class PredicateT> ArrayRef<T> drop_until(PredicateT Pred) const {
+      return ArrayRef<T>(find_if(*this, Pred), end());
+    }
+
+    /// \brief Return a copy of *this with only the first \p N elements.
+    ArrayRef<T> take_front(size_t N = 1) const {
+      if (N >= size())
+        return *this;
+      return drop_back(size() - N);
+    }
+
+    /// \brief Return a copy of *this with only the last \p N elements.
+    ArrayRef<T> take_back(size_t N = 1) const {
+      if (N >= size())
+        return *this;
+      return drop_front(size() - N);
+    }
+
+    /// \brief Return the first N elements of this Array that satisfy the given
+    /// predicate.
+    template <class PredicateT> ArrayRef<T> take_while(PredicateT Pred) const {
+      return ArrayRef<T>(begin(), find_if_not(*this, Pred));
+    }
+
+    /// \brief Return the first N elements of this Array that don't satisfy the
+    /// given predicate.
+    template <class PredicateT> ArrayRef<T> take_until(PredicateT Pred) const {
+      return ArrayRef<T>(begin(), find_if(*this, Pred));
+    }
+
+    /// @}
+    /// @name Operator Overloads
+    /// @{
+    const T &operator[](size_t Index) const {
+      assert(Index < Length && "Invalid index!");
+      return Data[Index];
+    }
+
+    /// Disallow accidental assignment from a temporary.
+    ///
+    /// The declaration here is extra complicated so that "arrayRef = {}"
+    /// continues to select the move assignment operator.
+    template <typename U>
+    typename std::enable_if<std::is_same<U, T>::value, ArrayRef<T>>::type &
+    operator=(U &&Temporary) = delete;
+
+    /// Disallow accidental assignment from a temporary.
+    ///
+    /// The declaration here is extra complicated so that "arrayRef = {}"
+    /// continues to select the move assignment operator.
+    template <typename U>
+    typename std::enable_if<std::is_same<U, T>::value, ArrayRef<T>>::type &
+    operator=(std::initializer_list<U>) = delete;
+
+    /// @}
+    /// @name Expensive Operations
+    /// @{
+    std::vector<T> vec() const {
+      return std::vector<T>(Data, Data+Length);
+    }
+
+    /// @}
+    /// @name Conversion operators
+    /// @{
+    operator std::vector<T>() const {
+      return std::vector<T>(Data, Data+Length);
+    }
+
+    /// @}
+  };
+
+  /// MutableArrayRef - Represent a mutable reference to an array (0 or more
+  /// elements consecutively in memory), i.e. a start pointer and a length.  It
+  /// allows various APIs to take and modify consecutive elements easily and
+  /// conveniently.
+  ///
+  /// This class does not own the underlying data, it is expected to be used in
+  /// situations where the data resides in some other buffer, whose lifetime
+  /// extends past that of the MutableArrayRef. For this reason, it is not in
+  /// general safe to store a MutableArrayRef.
+  ///
+  /// This is intended to be trivially copyable, so it should be passed by
+  /// value.
+  template<typename T>
+  class LLVM_NODISCARD MutableArrayRef : public ArrayRef<T> {
+  public:
+    using iterator = T *;
+    using reverse_iterator = std::reverse_iterator<iterator>;
+
+    /// Construct an empty MutableArrayRef.
+    /*implicit*/ MutableArrayRef() = default;
+
+    /// Construct an empty MutableArrayRef from None.
+    /*implicit*/ MutableArrayRef(NoneType) : ArrayRef<T>() {}
+
+    /// Construct an MutableArrayRef from a single element.
+    /*implicit*/ MutableArrayRef(T &OneElt) : ArrayRef<T>(OneElt) {}
+
+    /// Construct an MutableArrayRef from a pointer and length.
+    /*implicit*/ MutableArrayRef(T *data, size_t length)
+      : ArrayRef<T>(data, length) {}
+
+    /// Construct an MutableArrayRef from a range.
+    MutableArrayRef(T *begin, T *end) : ArrayRef<T>(begin, end) {}
+
+    /// Construct an MutableArrayRef from a SmallVector.
+    /*implicit*/ MutableArrayRef(SmallVectorImpl<T> &Vec)
+    : ArrayRef<T>(Vec) {}
+
+    /// Construct a MutableArrayRef from a std::vector.
+    /*implicit*/ MutableArrayRef(std::vector<T> &Vec)
+    : ArrayRef<T>(Vec) {}
+
+    /// Construct an ArrayRef from a std::array
+    template <size_t N>
+    /*implicit*/ constexpr MutableArrayRef(std::array<T, N> &Arr)
+        : ArrayRef<T>(Arr) {}
+
+    /// Construct an MutableArrayRef from a C array.
+    template <size_t N>
+    /*implicit*/ constexpr MutableArrayRef(T (&Arr)[N]) : ArrayRef<T>(Arr) {}
+
+    T *data() const { return const_cast<T*>(ArrayRef<T>::data()); }
+
+    iterator begin() const { return data(); }
+    iterator end() const { return data() + this->size(); }
+
+    reverse_iterator rbegin() const { return reverse_iterator(end()); }
+    reverse_iterator rend() const { return reverse_iterator(begin()); }
+
+    /// front - Get the first element.
+    T &front() const {
+      assert(!this->empty());
+      return data()[0];
+    }
+
+    /// back - Get the last element.
+    T &back() const {
+      assert(!this->empty());
+      return data()[this->size()-1];
+    }
+
+    /// slice(n, m) - Chop off the first N elements of the array, and keep M
+    /// elements in the array.
+    MutableArrayRef<T> slice(size_t N, size_t M) const {
+      assert(N + M <= this->size() && "Invalid specifier");
+      return MutableArrayRef<T>(this->data() + N, M);
+    }
+
+    /// slice(n) - Chop off the first N elements of the array.
+    MutableArrayRef<T> slice(size_t N) const {
+      return slice(N, this->size() - N);
+    }
+
+    /// \brief Drop the first \p N elements of the array.
+    MutableArrayRef<T> drop_front(size_t N = 1) const {
+      assert(this->size() >= N && "Dropping more elements than exist");
+      return slice(N, this->size() - N);
+    }
+
+    MutableArrayRef<T> drop_back(size_t N = 1) const {
+      assert(this->size() >= N && "Dropping more elements than exist");
+      return slice(0, this->size() - N);
+    }
+
+    /// \brief Return a copy of *this with the first N elements satisfying the
+    /// given predicate removed.
+    template <class PredicateT>
+    MutableArrayRef<T> drop_while(PredicateT Pred) const {
+      return MutableArrayRef<T>(find_if_not(*this, Pred), end());
+    }
+
+    /// \brief Return a copy of *this with the first N elements not satisfying
+    /// the given predicate removed.
+    template <class PredicateT>
+    MutableArrayRef<T> drop_until(PredicateT Pred) const {
+      return MutableArrayRef<T>(find_if(*this, Pred), end());
+    }
+
+    /// \brief Return a copy of *this with only the first \p N elements.
+    MutableArrayRef<T> take_front(size_t N = 1) const {
+      if (N >= this->size())
+        return *this;
+      return drop_back(this->size() - N);
+    }
+
+    /// \brief Return a copy of *this with only the last \p N elements.
+    MutableArrayRef<T> take_back(size_t N = 1) const {
+      if (N >= this->size())
+        return *this;
+      return drop_front(this->size() - N);
+    }
+
+    /// \brief Return the first N elements of this Array that satisfy the given
+    /// predicate.
+    template <class PredicateT>
+    MutableArrayRef<T> take_while(PredicateT Pred) const {
+      return MutableArrayRef<T>(begin(), find_if_not(*this, Pred));
+    }
+
+    /// \brief Return the first N elements of this Array that don't satisfy the
+    /// given predicate.
+    template <class PredicateT>
+    MutableArrayRef<T> take_until(PredicateT Pred) const {
+      return MutableArrayRef<T>(begin(), find_if(*this, Pred));
+    }
+
+    /// @}
+    /// @name Operator Overloads
+    /// @{
+    T &operator[](size_t Index) const {
+      assert(Index < this->size() && "Invalid index!");
+      return data()[Index];
+    }
+  };
+
+  /// This is a MutableArrayRef that owns its array.
+  template <typename T> class OwningArrayRef : public MutableArrayRef<T> {
+  public:
+    OwningArrayRef() = default;
+    OwningArrayRef(size_t Size) : MutableArrayRef<T>(new T[Size], Size) {}
+
+    OwningArrayRef(ArrayRef<T> Data)
+        : MutableArrayRef<T>(new T[Data.size()], Data.size()) {
+      std::copy(Data.begin(), Data.end(), this->begin());
+    }
+
+    OwningArrayRef(OwningArrayRef &&Other) { *this = Other; }
+
+    OwningArrayRef &operator=(OwningArrayRef &&Other) {
+      delete[] this->data();
+      this->MutableArrayRef<T>::operator=(Other);
+      Other.MutableArrayRef<T>::operator=(MutableArrayRef<T>());
+      return *this;
+    }
+
+    ~OwningArrayRef() { delete[] this->data(); }
+  };
+
+  /// @name ArrayRef Convenience constructors
+  /// @{
+
+  /// Construct an ArrayRef from a single element.
+  template<typename T>
+  ArrayRef<T> makeArrayRef(const T &OneElt) {
+    return OneElt;
+  }
+
+  /// Construct an ArrayRef from a pointer and length.
+  template<typename T>
+  ArrayRef<T> makeArrayRef(const T *data, size_t length) {
+    return ArrayRef<T>(data, length);
+  }
+
+  /// Construct an ArrayRef from a range.
+  template<typename T>
+  ArrayRef<T> makeArrayRef(const T *begin, const T *end) {
+    return ArrayRef<T>(begin, end);
+  }
+
+  /// Construct an ArrayRef from a SmallVector.
+  template <typename T>
+  ArrayRef<T> makeArrayRef(const SmallVectorImpl<T> &Vec) {
+    return Vec;
+  }
+
+  /// Construct an ArrayRef from a SmallVector.
+  template <typename T, unsigned N>
+  ArrayRef<T> makeArrayRef(const SmallVector<T, N> &Vec) {
+    return Vec;
+  }
+
+  /// Construct an ArrayRef from a std::vector.
+  template<typename T>
+  ArrayRef<T> makeArrayRef(const std::vector<T> &Vec) {
+    return Vec;
+  }
+
+  /// Construct an ArrayRef from an ArrayRef (no-op) (const)
+  template <typename T> ArrayRef<T> makeArrayRef(const ArrayRef<T> &Vec) {
+    return Vec;
+  }
+
+  /// Construct an ArrayRef from an ArrayRef (no-op)
+  template <typename T> ArrayRef<T> &makeArrayRef(ArrayRef<T> &Vec) {
+    return Vec;
+  }
+
+  /// Construct an ArrayRef from a C array.
+  template<typename T, size_t N>
+  ArrayRef<T> makeArrayRef(const T (&Arr)[N]) {
+    return ArrayRef<T>(Arr);
+  }
+
+  /// Construct a MutableArrayRef from a single element.
+  template<typename T>
+  MutableArrayRef<T> makeMutableArrayRef(T &OneElt) {
+    return OneElt;
+  }
+
+  /// Construct a MutableArrayRef from a pointer and length.
+  template<typename T>
+  MutableArrayRef<T> makeMutableArrayRef(T *data, size_t length) {
+    return MutableArrayRef<T>(data, length);
+  }
+
+  /// @}
+  /// @name ArrayRef Comparison Operators
+  /// @{
+
+  template<typename T>
+  inline bool operator==(ArrayRef<T> LHS, ArrayRef<T> RHS) {
+    return LHS.equals(RHS);
+  }
+
+  template<typename T>
+  inline bool operator!=(ArrayRef<T> LHS, ArrayRef<T> RHS) {
+    return !(LHS == RHS);
+  }
+
+  /// @}
+
+  // ArrayRefs can be treated like a POD type.
+  template <typename T> struct isPodLike;
+  template <typename T> struct isPodLike<ArrayRef<T>> {
+    static const bool value = true;
+  };
+
+  template <typename T> hash_code hash_value(ArrayRef<T> S) {
+    return hash_combine_range(S.begin(), S.end());
+  }
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ARRAYREF_H
diff --git a/linux-x64/clang/include/llvm/ADT/BitVector.h b/linux-x64/clang/include/llvm/ADT/BitVector.h
new file mode 100644
index 0000000..124c2a8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/BitVector.h
@@ -0,0 +1,929 @@
+//===- llvm/ADT/BitVector.h - Bit vectors -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the BitVector class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_BITVECTOR_H
+#define LLVM_ADT_BITVECTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <utility>
+
+namespace llvm {
+
+/// ForwardIterator for the bits that are set.
+/// Iterators get invalidated when resize / reserve is called.
+template <typename BitVectorT> class const_set_bits_iterator_impl {
+  const BitVectorT &Parent;
+  int Current = 0;
+
+  void advance() {
+    assert(Current != -1 && "Trying to advance past end.");
+    Current = Parent.find_next(Current);
+  }
+
+public:
+  const_set_bits_iterator_impl(const BitVectorT &Parent, int Current)
+      : Parent(Parent), Current(Current) {}
+  explicit const_set_bits_iterator_impl(const BitVectorT &Parent)
+      : const_set_bits_iterator_impl(Parent, Parent.find_first()) {}
+  const_set_bits_iterator_impl(const const_set_bits_iterator_impl &) = default;
+
+  const_set_bits_iterator_impl operator++(int) {
+    auto Prev = *this;
+    advance();
+    return Prev;
+  }
+
+  const_set_bits_iterator_impl &operator++() {
+    advance();
+    return *this;
+  }
+
+  unsigned operator*() const { return Current; }
+
+  bool operator==(const const_set_bits_iterator_impl &Other) const {
+    assert(&Parent == &Other.Parent &&
+           "Comparing iterators from different BitVectors");
+    return Current == Other.Current;
+  }
+
+  bool operator!=(const const_set_bits_iterator_impl &Other) const {
+    assert(&Parent == &Other.Parent &&
+           "Comparing iterators from different BitVectors");
+    return Current != Other.Current;
+  }
+};
+
+class BitVector {
+  typedef unsigned long BitWord;
+
+  enum { BITWORD_SIZE = (unsigned)sizeof(BitWord) * CHAR_BIT };
+
+  static_assert(BITWORD_SIZE == 64 || BITWORD_SIZE == 32,
+                "Unsupported word size");
+
+  MutableArrayRef<BitWord> Bits; // Actual bits.
+  unsigned Size;                 // Size of bitvector in bits.
+
+public:
+  typedef unsigned size_type;
+  // Encapsulation of a single bit.
+  class reference {
+    friend class BitVector;
+
+    BitWord *WordRef;
+    unsigned BitPos;
+
+  public:
+    reference(BitVector &b, unsigned Idx) {
+      WordRef = &b.Bits[Idx / BITWORD_SIZE];
+      BitPos = Idx % BITWORD_SIZE;
+    }
+
+    reference() = delete;
+    reference(const reference&) = default;
+
+    reference &operator=(reference t) {
+      *this = bool(t);
+      return *this;
+    }
+
+    reference& operator=(bool t) {
+      if (t)
+        *WordRef |= BitWord(1) << BitPos;
+      else
+        *WordRef &= ~(BitWord(1) << BitPos);
+      return *this;
+    }
+
+    operator bool() const {
+      return ((*WordRef) & (BitWord(1) << BitPos)) != 0;
+    }
+  };
+
+  typedef const_set_bits_iterator_impl<BitVector> const_set_bits_iterator;
+  typedef const_set_bits_iterator set_iterator;
+
+  const_set_bits_iterator set_bits_begin() const {
+    return const_set_bits_iterator(*this);
+  }
+  const_set_bits_iterator set_bits_end() const {
+    return const_set_bits_iterator(*this, -1);
+  }
+  iterator_range<const_set_bits_iterator> set_bits() const {
+    return make_range(set_bits_begin(), set_bits_end());
+  }
+
+  /// BitVector default ctor - Creates an empty bitvector.
+  BitVector() : Size(0) {}
+
+  /// BitVector ctor - Creates a bitvector of specified number of bits. All
+  /// bits are initialized to the specified value.
+  explicit BitVector(unsigned s, bool t = false) : Size(s) {
+    size_t Capacity = NumBitWords(s);
+    Bits = allocate(Capacity);
+    init_words(Bits, t);
+    if (t)
+      clear_unused_bits();
+  }
+
+  /// BitVector copy ctor.
+  BitVector(const BitVector &RHS) : Size(RHS.size()) {
+    if (Size == 0) {
+      Bits = MutableArrayRef<BitWord>();
+      return;
+    }
+
+    size_t Capacity = NumBitWords(RHS.size());
+    Bits = allocate(Capacity);
+    std::memcpy(Bits.data(), RHS.Bits.data(), Capacity * sizeof(BitWord));
+  }
+
+  BitVector(BitVector &&RHS) : Bits(RHS.Bits), Size(RHS.Size) {
+    RHS.Bits = MutableArrayRef<BitWord>();
+    RHS.Size = 0;
+  }
+
+  ~BitVector() { std::free(Bits.data()); }
+
+  /// empty - Tests whether there are no bits in this bitvector.
+  bool empty() const { return Size == 0; }
+
+  /// size - Returns the number of bits in this bitvector.
+  size_type size() const { return Size; }
+
+  /// count - Returns the number of bits which are set.
+  size_type count() const {
+    unsigned NumBits = 0;
+    for (unsigned i = 0; i < NumBitWords(size()); ++i)
+      NumBits += countPopulation(Bits[i]);
+    return NumBits;
+  }
+
+  /// any - Returns true if any bit is set.
+  bool any() const {
+    for (unsigned i = 0; i < NumBitWords(size()); ++i)
+      if (Bits[i] != 0)
+        return true;
+    return false;
+  }
+
+  /// all - Returns true if all bits are set.
+  bool all() const {
+    for (unsigned i = 0; i < Size / BITWORD_SIZE; ++i)
+      if (Bits[i] != ~0UL)
+        return false;
+
+    // If bits remain check that they are ones. The unused bits are always zero.
+    if (unsigned Remainder = Size % BITWORD_SIZE)
+      return Bits[Size / BITWORD_SIZE] == (1UL << Remainder) - 1;
+
+    return true;
+  }
+
+  /// none - Returns true if none of the bits are set.
+  bool none() const {
+    return !any();
+  }
+
+  /// find_first_in - Returns the index of the first set bit in the range
+  /// [Begin, End).  Returns -1 if all bits in the range are unset.
+  int find_first_in(unsigned Begin, unsigned End) const {
+    assert(Begin <= End && End <= Size);
+    if (Begin == End)
+      return -1;
+
+    unsigned FirstWord = Begin / BITWORD_SIZE;
+    unsigned LastWord = (End - 1) / BITWORD_SIZE;
+
+    // Check subsequent words.
+    for (unsigned i = FirstWord; i <= LastWord; ++i) {
+      BitWord Copy = Bits[i];
+
+      if (i == FirstWord) {
+        unsigned FirstBit = Begin % BITWORD_SIZE;
+        Copy &= maskTrailingZeros<BitWord>(FirstBit);
+      }
+
+      if (i == LastWord) {
+        unsigned LastBit = (End - 1) % BITWORD_SIZE;
+        Copy &= maskTrailingOnes<BitWord>(LastBit + 1);
+      }
+      if (Copy != 0)
+        return i * BITWORD_SIZE + countTrailingZeros(Copy);
+    }
+    return -1;
+  }
+
+  /// find_last_in - Returns the index of the last set bit in the range
+  /// [Begin, End).  Returns -1 if all bits in the range are unset.
+  int find_last_in(unsigned Begin, unsigned End) const {
+    assert(Begin <= End && End <= Size);
+    if (Begin == End)
+      return -1;
+
+    unsigned LastWord = (End - 1) / BITWORD_SIZE;
+    unsigned FirstWord = Begin / BITWORD_SIZE;
+
+    for (unsigned i = LastWord + 1; i >= FirstWord + 1; --i) {
+      unsigned CurrentWord = i - 1;
+
+      BitWord Copy = Bits[CurrentWord];
+      if (CurrentWord == LastWord) {
+        unsigned LastBit = (End - 1) % BITWORD_SIZE;
+        Copy &= maskTrailingOnes<BitWord>(LastBit + 1);
+      }
+
+      if (CurrentWord == FirstWord) {
+        unsigned FirstBit = Begin % BITWORD_SIZE;
+        Copy &= maskTrailingZeros<BitWord>(FirstBit);
+      }
+
+      if (Copy != 0)
+        return (CurrentWord + 1) * BITWORD_SIZE - countLeadingZeros(Copy) - 1;
+    }
+
+    return -1;
+  }
+
+  /// find_first_unset_in - Returns the index of the first unset bit in the
+  /// range [Begin, End).  Returns -1 if all bits in the range are set.
+  int find_first_unset_in(unsigned Begin, unsigned End) const {
+    assert(Begin <= End && End <= Size);
+    if (Begin == End)
+      return -1;
+
+    unsigned FirstWord = Begin / BITWORD_SIZE;
+    unsigned LastWord = (End - 1) / BITWORD_SIZE;
+
+    // Check subsequent words.
+    for (unsigned i = FirstWord; i <= LastWord; ++i) {
+      BitWord Copy = Bits[i];
+
+      if (i == FirstWord) {
+        unsigned FirstBit = Begin % BITWORD_SIZE;
+        Copy |= maskTrailingOnes<BitWord>(FirstBit);
+      }
+
+      if (i == LastWord) {
+        unsigned LastBit = (End - 1) % BITWORD_SIZE;
+        Copy |= maskTrailingZeros<BitWord>(LastBit + 1);
+      }
+      if (Copy != ~0UL) {
+        unsigned Result = i * BITWORD_SIZE + countTrailingOnes(Copy);
+        return Result < size() ? Result : -1;
+      }
+    }
+    return -1;
+  }
+
+  /// find_last_unset_in - Returns the index of the last unset bit in the
+  /// range [Begin, End).  Returns -1 if all bits in the range are set.
+  int find_last_unset_in(unsigned Begin, unsigned End) const {
+    assert(Begin <= End && End <= Size);
+    if (Begin == End)
+      return -1;
+
+    unsigned LastWord = (End - 1) / BITWORD_SIZE;
+    unsigned FirstWord = Begin / BITWORD_SIZE;
+
+    for (unsigned i = LastWord + 1; i >= FirstWord + 1; --i) {
+      unsigned CurrentWord = i - 1;
+
+      BitWord Copy = Bits[CurrentWord];
+      if (CurrentWord == LastWord) {
+        unsigned LastBit = (End - 1) % BITWORD_SIZE;
+        Copy |= maskTrailingZeros<BitWord>(LastBit + 1);
+      }
+
+      if (CurrentWord == FirstWord) {
+        unsigned FirstBit = Begin % BITWORD_SIZE;
+        Copy |= maskTrailingOnes<BitWord>(FirstBit);
+      }
+
+      if (Copy != ~0UL) {
+        unsigned Result =
+            (CurrentWord + 1) * BITWORD_SIZE - countLeadingOnes(Copy) - 1;
+        return Result < Size ? Result : -1;
+      }
+    }
+    return -1;
+  }
+
+  /// find_first - Returns the index of the first set bit, -1 if none
+  /// of the bits are set.
+  int find_first() const { return find_first_in(0, Size); }
+
+  /// find_last - Returns the index of the last set bit, -1 if none of the bits
+  /// are set.
+  int find_last() const { return find_last_in(0, Size); }
+
+  /// find_next - Returns the index of the next set bit following the
+  /// "Prev" bit. Returns -1 if the next set bit is not found.
+  int find_next(unsigned Prev) const { return find_first_in(Prev + 1, Size); }
+
+  /// find_prev - Returns the index of the first set bit that precedes the
+  /// the bit at \p PriorTo.  Returns -1 if all previous bits are unset.
+  int find_prev(unsigned PriorTo) const { return find_last_in(0, PriorTo); }
+
+  /// find_first_unset - Returns the index of the first unset bit, -1 if all
+  /// of the bits are set.
+  int find_first_unset() const { return find_first_unset_in(0, Size); }
+
+  /// find_next_unset - Returns the index of the next unset bit following the
+  /// "Prev" bit.  Returns -1 if all remaining bits are set.
+  int find_next_unset(unsigned Prev) const {
+    return find_first_unset_in(Prev + 1, Size);
+  }
+
+  /// find_last_unset - Returns the index of the last unset bit, -1 if all of
+  /// the bits are set.
+  int find_last_unset() const { return find_last_unset_in(0, Size); }
+
+  /// find_prev_unset - Returns the index of the first unset bit that precedes
+  /// the bit at \p PriorTo.  Returns -1 if all previous bits are set.
+  int find_prev_unset(unsigned PriorTo) {
+    return find_last_unset_in(0, PriorTo);
+  }
+
+  /// clear - Removes all bits from the bitvector. Does not change capacity.
+  void clear() {
+    Size = 0;
+  }
+
+  /// resize - Grow or shrink the bitvector.
+  void resize(unsigned N, bool t = false) {
+    if (N > getBitCapacity()) {
+      unsigned OldCapacity = Bits.size();
+      grow(N);
+      init_words(Bits.drop_front(OldCapacity), t);
+    }
+
+    // Set any old unused bits that are now included in the BitVector. This
+    // may set bits that are not included in the new vector, but we will clear
+    // them back out below.
+    if (N > Size)
+      set_unused_bits(t);
+
+    // Update the size, and clear out any bits that are now unused
+    unsigned OldSize = Size;
+    Size = N;
+    if (t || N < OldSize)
+      clear_unused_bits();
+  }
+
+  void reserve(unsigned N) {
+    if (N > getBitCapacity())
+      grow(N);
+  }
+
+  // Set, reset, flip
+  BitVector &set() {
+    init_words(Bits, true);
+    clear_unused_bits();
+    return *this;
+  }
+
+  BitVector &set(unsigned Idx) {
+    assert(Bits.data() && "Bits never allocated");
+    Bits[Idx / BITWORD_SIZE] |= BitWord(1) << (Idx % BITWORD_SIZE);
+    return *this;
+  }
+
+  /// set - Efficiently set a range of bits in [I, E)
+  BitVector &set(unsigned I, unsigned E) {
+    assert(I <= E && "Attempted to set backwards range!");
+    assert(E <= size() && "Attempted to set out-of-bounds range!");
+
+    if (I == E) return *this;
+
+    if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
+      BitWord EMask = 1UL << (E % BITWORD_SIZE);
+      BitWord IMask = 1UL << (I % BITWORD_SIZE);
+      BitWord Mask = EMask - IMask;
+      Bits[I / BITWORD_SIZE] |= Mask;
+      return *this;
+    }
+
+    BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
+    Bits[I / BITWORD_SIZE] |= PrefixMask;
+    I = alignTo(I, BITWORD_SIZE);
+
+    for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
+      Bits[I / BITWORD_SIZE] = ~0UL;
+
+    BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1;
+    if (I < E)
+      Bits[I / BITWORD_SIZE] |= PostfixMask;
+
+    return *this;
+  }
+
+  BitVector &reset() {
+    init_words(Bits, false);
+    return *this;
+  }
+
+  BitVector &reset(unsigned Idx) {
+    Bits[Idx / BITWORD_SIZE] &= ~(BitWord(1) << (Idx % BITWORD_SIZE));
+    return *this;
+  }
+
+  /// reset - Efficiently reset a range of bits in [I, E)
+  BitVector &reset(unsigned I, unsigned E) {
+    assert(I <= E && "Attempted to reset backwards range!");
+    assert(E <= size() && "Attempted to reset out-of-bounds range!");
+
+    if (I == E) return *this;
+
+    if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
+      BitWord EMask = 1UL << (E % BITWORD_SIZE);
+      BitWord IMask = 1UL << (I % BITWORD_SIZE);
+      BitWord Mask = EMask - IMask;
+      Bits[I / BITWORD_SIZE] &= ~Mask;
+      return *this;
+    }
+
+    BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
+    Bits[I / BITWORD_SIZE] &= ~PrefixMask;
+    I = alignTo(I, BITWORD_SIZE);
+
+    for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
+      Bits[I / BITWORD_SIZE] = 0UL;
+
+    BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1;
+    if (I < E)
+      Bits[I / BITWORD_SIZE] &= ~PostfixMask;
+
+    return *this;
+  }
+
+  BitVector &flip() {
+    for (unsigned i = 0; i < NumBitWords(size()); ++i)
+      Bits[i] = ~Bits[i];
+    clear_unused_bits();
+    return *this;
+  }
+
+  BitVector &flip(unsigned Idx) {
+    Bits[Idx / BITWORD_SIZE] ^= BitWord(1) << (Idx % BITWORD_SIZE);
+    return *this;
+  }
+
+  // Indexing.
+  reference operator[](unsigned Idx) {
+    assert (Idx < Size && "Out-of-bounds Bit access.");
+    return reference(*this, Idx);
+  }
+
+  bool operator[](unsigned Idx) const {
+    assert (Idx < Size && "Out-of-bounds Bit access.");
+    BitWord Mask = BitWord(1) << (Idx % BITWORD_SIZE);
+    return (Bits[Idx / BITWORD_SIZE] & Mask) != 0;
+  }
+
+  bool test(unsigned Idx) const {
+    return (*this)[Idx];
+  }
+
+  /// Test if any common bits are set.
+  bool anyCommon(const BitVector &RHS) const {
+    unsigned ThisWords = NumBitWords(size());
+    unsigned RHSWords  = NumBitWords(RHS.size());
+    for (unsigned i = 0, e = std::min(ThisWords, RHSWords); i != e; ++i)
+      if (Bits[i] & RHS.Bits[i])
+        return true;
+    return false;
+  }
+
+  // Comparison operators.
+  bool operator==(const BitVector &RHS) const {
+    unsigned ThisWords = NumBitWords(size());
+    unsigned RHSWords  = NumBitWords(RHS.size());
+    unsigned i;
+    for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
+      if (Bits[i] != RHS.Bits[i])
+        return false;
+
+    // Verify that any extra words are all zeros.
+    if (i != ThisWords) {
+      for (; i != ThisWords; ++i)
+        if (Bits[i])
+          return false;
+    } else if (i != RHSWords) {
+      for (; i != RHSWords; ++i)
+        if (RHS.Bits[i])
+          return false;
+    }
+    return true;
+  }
+
+  bool operator!=(const BitVector &RHS) const {
+    return !(*this == RHS);
+  }
+
+  /// Intersection, union, disjoint union.
+  BitVector &operator&=(const BitVector &RHS) {
+    unsigned ThisWords = NumBitWords(size());
+    unsigned RHSWords  = NumBitWords(RHS.size());
+    unsigned i;
+    for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
+      Bits[i] &= RHS.Bits[i];
+
+    // Any bits that are just in this bitvector become zero, because they aren't
+    // in the RHS bit vector.  Any words only in RHS are ignored because they
+    // are already zero in the LHS.
+    for (; i != ThisWords; ++i)
+      Bits[i] = 0;
+
+    return *this;
+  }
+
+  /// reset - Reset bits that are set in RHS. Same as *this &= ~RHS.
+  BitVector &reset(const BitVector &RHS) {
+    unsigned ThisWords = NumBitWords(size());
+    unsigned RHSWords  = NumBitWords(RHS.size());
+    unsigned i;
+    for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
+      Bits[i] &= ~RHS.Bits[i];
+    return *this;
+  }
+
+  /// test - Check if (This - RHS) is zero.
+  /// This is the same as reset(RHS) and any().
+  bool test(const BitVector &RHS) const {
+    unsigned ThisWords = NumBitWords(size());
+    unsigned RHSWords  = NumBitWords(RHS.size());
+    unsigned i;
+    for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
+      if ((Bits[i] & ~RHS.Bits[i]) != 0)
+        return true;
+
+    for (; i != ThisWords ; ++i)
+      if (Bits[i] != 0)
+        return true;
+
+    return false;
+  }
+
+  BitVector &operator|=(const BitVector &RHS) {
+    if (size() < RHS.size())
+      resize(RHS.size());
+    for (size_t i = 0, e = NumBitWords(RHS.size()); i != e; ++i)
+      Bits[i] |= RHS.Bits[i];
+    return *this;
+  }
+
+  BitVector &operator^=(const BitVector &RHS) {
+    if (size() < RHS.size())
+      resize(RHS.size());
+    for (size_t i = 0, e = NumBitWords(RHS.size()); i != e; ++i)
+      Bits[i] ^= RHS.Bits[i];
+    return *this;
+  }
+
+  BitVector &operator>>=(unsigned N) {
+    assert(N <= Size);
+    if (LLVM_UNLIKELY(empty() || N == 0))
+      return *this;
+
+    unsigned NumWords = NumBitWords(Size);
+    assert(NumWords >= 1);
+
+    wordShr(N / BITWORD_SIZE);
+
+    unsigned BitDistance = N % BITWORD_SIZE;
+    if (BitDistance == 0)
+      return *this;
+
+    // When the shift size is not a multiple of the word size, then we have
+    // a tricky situation where each word in succession needs to extract some
+    // of the bits from the next word and or them into this word while
+    // shifting this word to make room for the new bits.  This has to be done
+    // for every word in the array.
+
+    // Since we're shifting each word right, some bits will fall off the end
+    // of each word to the right, and empty space will be created on the left.
+    // The final word in the array will lose bits permanently, so starting at
+    // the beginning, work forwards shifting each word to the right, and
+    // OR'ing in the bits from the end of the next word to the beginning of
+    // the current word.
+
+    // Example:
+    //   Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting right
+    //   by 4 bits.
+    // Step 1: Word[0] >>= 4           ; 0x0ABBCCDD
+    // Step 2: Word[0] |= 0x10000000   ; 0x1ABBCCDD
+    // Step 3: Word[1] >>= 4           ; 0x0EEFF001
+    // Step 4: Word[1] |= 0x50000000   ; 0x5EEFF001
+    // Step 5: Word[2] >>= 4           ; 0x02334455
+    // Result: { 0x1ABBCCDD, 0x5EEFF001, 0x02334455 }
+    const BitWord Mask = maskTrailingOnes<BitWord>(BitDistance);
+    const unsigned LSH = BITWORD_SIZE - BitDistance;
+
+    for (unsigned I = 0; I < NumWords - 1; ++I) {
+      Bits[I] >>= BitDistance;
+      Bits[I] |= (Bits[I + 1] & Mask) << LSH;
+    }
+
+    Bits[NumWords - 1] >>= BitDistance;
+
+    return *this;
+  }
+
+  BitVector &operator<<=(unsigned N) {
+    assert(N <= Size);
+    if (LLVM_UNLIKELY(empty() || N == 0))
+      return *this;
+
+    unsigned NumWords = NumBitWords(Size);
+    assert(NumWords >= 1);
+
+    wordShl(N / BITWORD_SIZE);
+
+    unsigned BitDistance = N % BITWORD_SIZE;
+    if (BitDistance == 0)
+      return *this;
+
+    // When the shift size is not a multiple of the word size, then we have
+    // a tricky situation where each word in succession needs to extract some
+    // of the bits from the previous word and or them into this word while
+    // shifting this word to make room for the new bits.  This has to be done
+    // for every word in the array.  This is similar to the algorithm outlined
+    // in operator>>=, but backwards.
+
+    // Since we're shifting each word left, some bits will fall off the end
+    // of each word to the left, and empty space will be created on the right.
+    // The first word in the array will lose bits permanently, so starting at
+    // the end, work backwards shifting each word to the left, and OR'ing
+    // in the bits from the end of the next word to the beginning of the
+    // current word.
+
+    // Example:
+    //   Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting left
+    //   by 4 bits.
+    // Step 1: Word[2] <<= 4           ; 0x23344550
+    // Step 2: Word[2] |= 0x0000000E   ; 0x2334455E
+    // Step 3: Word[1] <<= 4           ; 0xEFF00110
+    // Step 4: Word[1] |= 0x0000000A   ; 0xEFF0011A
+    // Step 5: Word[0] <<= 4           ; 0xABBCCDD0
+    // Result: { 0xABBCCDD0, 0xEFF0011A, 0x2334455E }
+    const BitWord Mask = maskLeadingOnes<BitWord>(BitDistance);
+    const unsigned RSH = BITWORD_SIZE - BitDistance;
+
+    for (int I = NumWords - 1; I > 0; --I) {
+      Bits[I] <<= BitDistance;
+      Bits[I] |= (Bits[I - 1] & Mask) >> RSH;
+    }
+    Bits[0] <<= BitDistance;
+    clear_unused_bits();
+
+    return *this;
+  }
+
+  // Assignment operator.
+  const BitVector &operator=(const BitVector &RHS) {
+    if (this == &RHS) return *this;
+
+    Size = RHS.size();
+    unsigned RHSWords = NumBitWords(Size);
+    if (Size <= getBitCapacity()) {
+      if (Size)
+        std::memcpy(Bits.data(), RHS.Bits.data(), RHSWords * sizeof(BitWord));
+      clear_unused_bits();
+      return *this;
+    }
+
+    // Grow the bitvector to have enough elements.
+    unsigned NewCapacity = RHSWords;
+    assert(NewCapacity > 0 && "negative capacity?");
+    auto NewBits = allocate(NewCapacity);
+    std::memcpy(NewBits.data(), RHS.Bits.data(), NewCapacity * sizeof(BitWord));
+
+    // Destroy the old bits.
+    std::free(Bits.data());
+    Bits = NewBits;
+
+    return *this;
+  }
+
+  const BitVector &operator=(BitVector &&RHS) {
+    if (this == &RHS) return *this;
+
+    std::free(Bits.data());
+    Bits = RHS.Bits;
+    Size = RHS.Size;
+
+    RHS.Bits = MutableArrayRef<BitWord>();
+    RHS.Size = 0;
+
+    return *this;
+  }
+
+  void swap(BitVector &RHS) {
+    std::swap(Bits, RHS.Bits);
+    std::swap(Size, RHS.Size);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Portable bit mask operations.
+  //===--------------------------------------------------------------------===//
+  //
+  // These methods all operate on arrays of uint32_t, each holding 32 bits. The
+  // fixed word size makes it easier to work with literal bit vector constants
+  // in portable code.
+  //
+  // The LSB in each word is the lowest numbered bit.  The size of a portable
+  // bit mask is always a whole multiple of 32 bits.  If no bit mask size is
+  // given, the bit mask is assumed to cover the entire BitVector.
+
+  /// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize.
+  /// This computes "*this |= Mask".
+  void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+    applyMask<true, false>(Mask, MaskWords);
+  }
+
+  /// clearBitsInMask - Clear any bits in this vector that are set in Mask.
+  /// Don't resize. This computes "*this &= ~Mask".
+  void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+    applyMask<false, false>(Mask, MaskWords);
+  }
+
+  /// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask.
+  /// Don't resize.  This computes "*this |= ~Mask".
+  void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+    applyMask<true, true>(Mask, MaskWords);
+  }
+
+  /// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask.
+  /// Don't resize.  This computes "*this &= Mask".
+  void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+    applyMask<false, true>(Mask, MaskWords);
+  }
+
+private:
+  /// \brief Perform a logical left shift of \p Count words by moving everything
+  /// \p Count words to the right in memory.
+  ///
+  /// While confusing, words are stored from least significant at Bits[0] to
+  /// most significant at Bits[NumWords-1].  A logical shift left, however,
+  /// moves the current least significant bit to a higher logical index, and
+  /// fills the previous least significant bits with 0.  Thus, we actually
+  /// need to move the bytes of the memory to the right, not to the left.
+  /// Example:
+  ///   Words = [0xBBBBAAAA, 0xDDDDFFFF, 0x00000000, 0xDDDD0000]
+  /// represents a BitVector where 0xBBBBAAAA contain the least significant
+  /// bits.  So if we want to shift the BitVector left by 2 words, we need to
+  /// turn this into 0x00000000 0x00000000 0xBBBBAAAA 0xDDDDFFFF by using a
+  /// memmove which moves right, not left.
+  void wordShl(uint32_t Count) {
+    if (Count == 0)
+      return;
+
+    uint32_t NumWords = NumBitWords(Size);
+
+    auto Src = Bits.take_front(NumWords).drop_back(Count);
+    auto Dest = Bits.take_front(NumWords).drop_front(Count);
+
+    // Since we always move Word-sized chunks of data with src and dest both
+    // aligned to a word-boundary, we don't need to worry about endianness
+    // here.
+    std::memmove(Dest.begin(), Src.begin(), Dest.size() * sizeof(BitWord));
+    std::memset(Bits.data(), 0, Count * sizeof(BitWord));
+    clear_unused_bits();
+  }
+
+  /// \brief Perform a logical right shift of \p Count words by moving those
+  /// words to the left in memory.  See wordShl for more information.
+  ///
+  void wordShr(uint32_t Count) {
+    if (Count == 0)
+      return;
+
+    uint32_t NumWords = NumBitWords(Size);
+
+    auto Src = Bits.take_front(NumWords).drop_front(Count);
+    auto Dest = Bits.take_front(NumWords).drop_back(Count);
+    assert(Dest.size() == Src.size());
+
+    std::memmove(Dest.begin(), Src.begin(), Dest.size() * sizeof(BitWord));
+    std::memset(Dest.end(), 0, Count * sizeof(BitWord));
+  }
+
+  MutableArrayRef<BitWord> allocate(size_t NumWords) {
+    BitWord *RawBits = static_cast<BitWord *>(
+        safe_malloc(NumWords * sizeof(BitWord)));
+    return MutableArrayRef<BitWord>(RawBits, NumWords);
+  }
+
+  int next_unset_in_word(int WordIndex, BitWord Word) const {
+    unsigned Result = WordIndex * BITWORD_SIZE + countTrailingOnes(Word);
+    return Result < size() ? Result : -1;
+  }
+
+  unsigned NumBitWords(unsigned S) const {
+    return (S + BITWORD_SIZE-1) / BITWORD_SIZE;
+  }
+
+  // Set the unused bits in the high words.
+  void set_unused_bits(bool t = true) {
+    //  Set high words first.
+    unsigned UsedWords = NumBitWords(Size);
+    if (Bits.size() > UsedWords)
+      init_words(Bits.drop_front(UsedWords), t);
+
+    //  Then set any stray high bits of the last used word.
+    unsigned ExtraBits = Size % BITWORD_SIZE;
+    if (ExtraBits) {
+      BitWord ExtraBitMask = ~0UL << ExtraBits;
+      if (t)
+        Bits[UsedWords-1] |= ExtraBitMask;
+      else
+        Bits[UsedWords-1] &= ~ExtraBitMask;
+    }
+  }
+
+  // Clear the unused bits in the high words.
+  void clear_unused_bits() {
+    set_unused_bits(false);
+  }
+
+  void grow(unsigned NewSize) {
+    size_t NewCapacity = std::max<size_t>(NumBitWords(NewSize), Bits.size() * 2);
+    assert(NewCapacity > 0 && "realloc-ing zero space");
+    BitWord *NewBits = static_cast<BitWord *>(
+        safe_realloc(Bits.data(), NewCapacity * sizeof(BitWord)));
+    Bits = MutableArrayRef<BitWord>(NewBits, NewCapacity);
+    clear_unused_bits();
+  }
+
+  void init_words(MutableArrayRef<BitWord> B, bool t) {
+    if (B.size() > 0)
+      memset(B.data(), 0 - (int)t, B.size() * sizeof(BitWord));
+  }
+
+  template<bool AddBits, bool InvertMask>
+  void applyMask(const uint32_t *Mask, unsigned MaskWords) {
+    static_assert(BITWORD_SIZE % 32 == 0, "Unsupported BitWord size.");
+    MaskWords = std::min(MaskWords, (size() + 31) / 32);
+    const unsigned Scale = BITWORD_SIZE / 32;
+    unsigned i;
+    for (i = 0; MaskWords >= Scale; ++i, MaskWords -= Scale) {
+      BitWord BW = Bits[i];
+      // This inner loop should unroll completely when BITWORD_SIZE > 32.
+      for (unsigned b = 0; b != BITWORD_SIZE; b += 32) {
+        uint32_t M = *Mask++;
+        if (InvertMask) M = ~M;
+        if (AddBits) BW |=   BitWord(M) << b;
+        else         BW &= ~(BitWord(M) << b);
+      }
+      Bits[i] = BW;
+    }
+    for (unsigned b = 0; MaskWords; b += 32, --MaskWords) {
+      uint32_t M = *Mask++;
+      if (InvertMask) M = ~M;
+      if (AddBits) Bits[i] |=   BitWord(M) << b;
+      else         Bits[i] &= ~(BitWord(M) << b);
+    }
+    if (AddBits)
+      clear_unused_bits();
+  }
+
+public:
+  /// Return the size (in bytes) of the bit vector.
+  size_t getMemorySize() const { return Bits.size() * sizeof(BitWord); }
+  size_t getBitCapacity() const { return Bits.size() * BITWORD_SIZE; }
+};
+
+inline size_t capacity_in_bytes(const BitVector &X) {
+  return X.getMemorySize();
+}
+
+} // end namespace llvm
+
+namespace std {
+  /// Implement std::swap in terms of BitVector swap.
+  inline void
+  swap(llvm::BitVector &LHS, llvm::BitVector &RHS) {
+    LHS.swap(RHS);
+  }
+} // end namespace std
+
+#endif // LLVM_ADT_BITVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/BitmaskEnum.h b/linux-x64/clang/include/llvm/ADT/BitmaskEnum.h
new file mode 100644
index 0000000..18c6ba5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/BitmaskEnum.h
@@ -0,0 +1,153 @@
+//===-- llvm/ADT/BitmaskEnum.h ----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_BITMASKENUM_H
+#define LLVM_ADT_BITMASKENUM_H
+
+#include <cassert>
+#include <type_traits>
+#include <utility>
+
+#include "llvm/Support/MathExtras.h"
+
+/// LLVM_MARK_AS_BITMASK_ENUM lets you opt in an individual enum type so you can
+/// perform bitwise operations on it without putting static_cast everywhere.
+///
+/// \code
+///   enum MyEnum {
+///     E1 = 1, E2 = 2, E3 = 4, E4 = 8,
+///     LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ E4)
+///   };
+///
+///   void Foo() {
+///     MyEnum A = (E1 | E2) & E3 ^ ~E4; // Look, ma: No static_cast!
+///   }
+/// \endcode
+///
+/// Normally when you do a bitwise operation on an enum value, you get back an
+/// instance of the underlying type (e.g. int).  But using this macro, bitwise
+/// ops on your enum will return you back instances of the enum.  This is
+/// particularly useful for enums which represent a combination of flags.
+///
+/// The parameter to LLVM_MARK_AS_BITMASK_ENUM should be the largest individual
+/// value in your enum.
+///
+/// All of the enum's values must be non-negative.
+#define LLVM_MARK_AS_BITMASK_ENUM(LargestValue)                                \
+  LLVM_BITMASK_LARGEST_ENUMERATOR = LargestValue
+
+/// LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE() pulls the operator overloads used
+/// by LLVM_MARK_AS_BITMASK_ENUM into the current namespace.
+///
+/// Suppose you have an enum foo::bar::MyEnum.  Before using
+/// LLVM_MARK_AS_BITMASK_ENUM on MyEnum, you must put
+/// LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE() somewhere inside namespace foo or
+/// namespace foo::bar.  This allows the relevant operator overloads to be found
+/// by ADL.
+///
+/// You don't need to use this macro in namespace llvm; it's done at the bottom
+/// of this file.
+#define LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE()                               \
+  using ::llvm::BitmaskEnumDetail::operator~;                                  \
+  using ::llvm::BitmaskEnumDetail::operator|;                                  \
+  using ::llvm::BitmaskEnumDetail::operator&;                                  \
+  using ::llvm::BitmaskEnumDetail::operator^;                                  \
+  using ::llvm::BitmaskEnumDetail::operator|=;                                 \
+  using ::llvm::BitmaskEnumDetail::operator&=;                                 \
+  /* Force a semicolon at the end of this macro. */                            \
+  using ::llvm::BitmaskEnumDetail::operator^=
+
+namespace llvm {
+
+/// Traits class to determine whether an enum has a
+/// LLVM_BITMASK_LARGEST_ENUMERATOR enumerator.
+template <typename E, typename Enable = void>
+struct is_bitmask_enum : std::false_type {};
+
+template <typename E>
+struct is_bitmask_enum<
+    E, typename std::enable_if<sizeof(E::LLVM_BITMASK_LARGEST_ENUMERATOR) >=
+                               0>::type> : std::true_type {};
+namespace BitmaskEnumDetail {
+
+/// Get a bitmask with 1s in all places up to the high-order bit of E's largest
+/// value.
+template <typename E> typename std::underlying_type<E>::type Mask() {
+  // On overflow, NextPowerOf2 returns zero with the type uint64_t, so
+  // subtracting 1 gives us the mask with all bits set, like we want.
+  return NextPowerOf2(static_cast<typename std::underlying_type<E>::type>(
+             E::LLVM_BITMASK_LARGEST_ENUMERATOR)) -
+         1;
+}
+
+/// Check that Val is in range for E, and return Val cast to E's underlying
+/// type.
+template <typename E> typename std::underlying_type<E>::type Underlying(E Val) {
+  auto U = static_cast<typename std::underlying_type<E>::type>(Val);
+  assert(U >= 0 && "Negative enum values are not allowed.");
+  assert(U <= Mask<E>() && "Enum value too large (or largest val too small?)");
+  return U;
+}
+
+template <typename E,
+          typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+E operator~(E Val) {
+  return static_cast<E>(~Underlying(Val) & Mask<E>());
+}
+
+template <typename E,
+          typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+E operator|(E LHS, E RHS) {
+  return static_cast<E>(Underlying(LHS) | Underlying(RHS));
+}
+
+template <typename E,
+          typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+E operator&(E LHS, E RHS) {
+  return static_cast<E>(Underlying(LHS) & Underlying(RHS));
+}
+
+template <typename E,
+          typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+E operator^(E LHS, E RHS) {
+  return static_cast<E>(Underlying(LHS) ^ Underlying(RHS));
+}
+
+// |=, &=, and ^= return a reference to LHS, to match the behavior of the
+// operators on builtin types.
+
+template <typename E,
+          typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+E &operator|=(E &LHS, E RHS) {
+  LHS = LHS | RHS;
+  return LHS;
+}
+
+template <typename E,
+          typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+E &operator&=(E &LHS, E RHS) {
+  LHS = LHS & RHS;
+  return LHS;
+}
+
+template <typename E,
+          typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
+E &operator^=(E &LHS, E RHS) {
+  LHS = LHS ^ RHS;
+  return LHS;
+}
+
+} // namespace BitmaskEnumDetail
+
+// Enable bitmask enums in namespace ::llvm and all nested namespaces.
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/BreadthFirstIterator.h b/linux-x64/clang/include/llvm/ADT/BreadthFirstIterator.h
new file mode 100644
index 0000000..6bc63c2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/BreadthFirstIterator.h
@@ -0,0 +1,163 @@
+//===- llvm/ADT/BreadthFirstIterator.h - Breadth First iterator -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file builds on the ADT/GraphTraits.h file to build a generic breadth
+// first graph iterator.  This file exposes the following functions/types:
+//
+// bf_begin/bf_end/bf_iterator
+//   * Normal breadth-first iteration - visit a graph level-by-level.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_BREADTHFIRSTITERATOR_H
+#define LLVM_ADT_BREADTHFIRSTITERATOR_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/iterator_range.h"
+#include <iterator>
+#include <queue>
+#include <utility>
+
+namespace llvm {
+
+// bf_iterator_storage - A private class which is used to figure out where to
+// store the visited set. We only provide a non-external variant for now.
+template <class SetType> class bf_iterator_storage {
+public:
+  SetType Visited;
+};
+
+// The visited state for the iteration is a simple set.
+template <typename NodeRef, unsigned SmallSize = 8>
+using bf_iterator_default_set = SmallPtrSet<NodeRef, SmallSize>;
+
+// Generic Breadth first search iterator.
+template <class GraphT,
+          class SetType =
+              bf_iterator_default_set<typename GraphTraits<GraphT>::NodeRef>,
+          class GT = GraphTraits<GraphT>>
+class bf_iterator
+    : public std::iterator<std::forward_iterator_tag, typename GT::NodeRef>,
+      public bf_iterator_storage<SetType> {
+  using super = std::iterator<std::forward_iterator_tag, typename GT::NodeRef>;
+
+  using NodeRef = typename GT::NodeRef;
+  using ChildItTy = typename GT::ChildIteratorType;
+
+  // First element is the node reference, second is the next child to visit.
+  using QueueElement = std::pair<NodeRef, Optional<ChildItTy>>;
+
+  // Visit queue - used to maintain BFS ordering.
+  // Optional<> because we need markers for levels.
+  std::queue<Optional<QueueElement>> VisitQueue;
+
+  // Current level.
+  unsigned Level;
+
+private:
+  inline bf_iterator(NodeRef Node) {
+    this->Visited.insert(Node);
+    Level = 0;
+
+    // Also, insert a dummy node as marker.
+    VisitQueue.push(QueueElement(Node, None));
+    VisitQueue.push(None);
+  }
+
+  inline bf_iterator() = default;
+
+  inline void toNext() {
+    Optional<QueueElement> Head = VisitQueue.front();
+    QueueElement H = Head.getValue();
+    NodeRef Node = H.first;
+    Optional<ChildItTy> &ChildIt = H.second;
+
+    if (!ChildIt)
+      ChildIt.emplace(GT::child_begin(Node));
+    while (*ChildIt != GT::child_end(Node)) {
+      NodeRef Next = *(*ChildIt)++;
+
+      // Already visited?
+      if (this->Visited.insert(Next).second)
+        VisitQueue.push(QueueElement(Next, None));
+    }
+    VisitQueue.pop();
+
+    // Go to the next element skipping markers if needed.
+    if (!VisitQueue.empty()) {
+      Head = VisitQueue.front();
+      if (Head != None)
+        return;
+      Level += 1;
+      VisitQueue.pop();
+
+      // Don't push another marker if this is the last
+      // element.
+      if (!VisitQueue.empty())
+        VisitQueue.push(None);
+    }
+  }
+
+public:
+  using pointer = typename super::pointer;
+
+  // Provide static begin and end methods as our public "constructors"
+  static bf_iterator begin(const GraphT &G) {
+    return bf_iterator(GT::getEntryNode(G));
+  }
+
+  static bf_iterator end(const GraphT &G) { return bf_iterator(); }
+
+  bool operator==(const bf_iterator &RHS) const {
+    return VisitQueue == RHS.VisitQueue;
+  }
+
+  bool operator!=(const bf_iterator &RHS) const { return !(*this == RHS); }
+
+  const NodeRef &operator*() const { return VisitQueue.front()->first; }
+
+  // This is a nonstandard operator-> that dereferenfces the pointer an extra
+  // time so that you can actually call methods on the node, because the
+  // contained type is a pointer.
+  NodeRef operator->() const { return **this; }
+
+  bf_iterator &operator++() { // Pre-increment
+    toNext();
+    return *this;
+  }
+
+  bf_iterator operator++(int) { // Post-increment
+    bf_iterator ItCopy = *this;
+    ++*this;
+    return ItCopy;
+  }
+
+  unsigned getLevel() const { return Level; }
+};
+
+// Provide global constructors that automatically figure out correct types.
+template <class T> bf_iterator<T> bf_begin(const T &G) {
+  return bf_iterator<T>::begin(G);
+}
+
+template <class T> bf_iterator<T> bf_end(const T &G) {
+  return bf_iterator<T>::end(G);
+}
+
+// Provide an accessor method to use them in range-based patterns.
+template <class T> iterator_range<bf_iterator<T>> breadth_first(const T &G) {
+  return make_range(bf_begin(G), bf_end(G));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_BREADTHFIRSTITERATOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/CachedHashString.h b/linux-x64/clang/include/llvm/ADT/CachedHashString.h
new file mode 100644
index 0000000..a56a621
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/CachedHashString.h
@@ -0,0 +1,184 @@
+//===- llvm/ADT/CachedHashString.h - Prehashed string/StringRef -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines CachedHashString and CachedHashStringRef.  These are owning
+// and not-owning string types that store their hash in addition to their string
+// data.
+//
+// Unlike std::string, CachedHashString can be used in DenseSet/DenseMap
+// (because, unlike std::string, CachedHashString lets us have empty and
+// tombstone values).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_CACHED_HASH_STRING_H
+#define LLVM_ADT_CACHED_HASH_STRING_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// A container which contains a StringRef plus a precomputed hash.
+class CachedHashStringRef {
+  const char *P;
+  uint32_t Size;
+  uint32_t Hash;
+
+public:
+  // Explicit because hashing a string isn't free.
+  explicit CachedHashStringRef(StringRef S)
+      : CachedHashStringRef(S, DenseMapInfo<StringRef>::getHashValue(S)) {}
+
+  CachedHashStringRef(StringRef S, uint32_t Hash)
+      : P(S.data()), Size(S.size()), Hash(Hash) {
+    assert(S.size() <= std::numeric_limits<uint32_t>::max());
+  }
+
+  StringRef val() const { return StringRef(P, Size); }
+  uint32_t size() const { return Size; }
+  uint32_t hash() const { return Hash; }
+};
+
+template <> struct DenseMapInfo<CachedHashStringRef> {
+  static CachedHashStringRef getEmptyKey() {
+    return CachedHashStringRef(DenseMapInfo<StringRef>::getEmptyKey(), 0);
+  }
+  static CachedHashStringRef getTombstoneKey() {
+    return CachedHashStringRef(DenseMapInfo<StringRef>::getTombstoneKey(), 1);
+  }
+  static unsigned getHashValue(const CachedHashStringRef &S) {
+    assert(!isEqual(S, getEmptyKey()) && "Cannot hash the empty key!");
+    assert(!isEqual(S, getTombstoneKey()) && "Cannot hash the tombstone key!");
+    return S.hash();
+  }
+  static bool isEqual(const CachedHashStringRef &LHS,
+                      const CachedHashStringRef &RHS) {
+    return LHS.hash() == RHS.hash() &&
+           DenseMapInfo<StringRef>::isEqual(LHS.val(), RHS.val());
+  }
+};
+
+/// A container which contains a string, which it owns, plus a precomputed hash.
+///
+/// We do not null-terminate the string.
+class CachedHashString {
+  friend struct DenseMapInfo<CachedHashString>;
+
+  char *P;
+  uint32_t Size;
+  uint32_t Hash;
+
+  static char *getEmptyKeyPtr() { return DenseMapInfo<char *>::getEmptyKey(); }
+  static char *getTombstoneKeyPtr() {
+    return DenseMapInfo<char *>::getTombstoneKey();
+  }
+
+  bool isEmptyOrTombstone() const {
+    return P == getEmptyKeyPtr() || P == getTombstoneKeyPtr();
+  }
+
+  struct ConstructEmptyOrTombstoneTy {};
+
+  CachedHashString(ConstructEmptyOrTombstoneTy, char *EmptyOrTombstonePtr)
+      : P(EmptyOrTombstonePtr), Size(0), Hash(0) {
+    assert(isEmptyOrTombstone());
+  }
+
+  // TODO: Use small-string optimization to avoid allocating.
+
+public:
+  explicit CachedHashString(const char *S) : CachedHashString(StringRef(S)) {}
+
+  // Explicit because copying and hashing a string isn't free.
+  explicit CachedHashString(StringRef S)
+      : CachedHashString(S, DenseMapInfo<StringRef>::getHashValue(S)) {}
+
+  CachedHashString(StringRef S, uint32_t Hash)
+      : P(new char[S.size()]), Size(S.size()), Hash(Hash) {
+    memcpy(P, S.data(), S.size());
+  }
+
+  // Ideally this class would not be copyable.  But SetVector requires copyable
+  // keys, and we want this to be usable there.
+  CachedHashString(const CachedHashString &Other)
+      : Size(Other.Size), Hash(Other.Hash) {
+    if (Other.isEmptyOrTombstone()) {
+      P = Other.P;
+    } else {
+      P = new char[Size];
+      memcpy(P, Other.P, Size);
+    }
+  }
+
+  CachedHashString &operator=(CachedHashString Other) {
+    swap(*this, Other);
+    return *this;
+  }
+
+  CachedHashString(CachedHashString &&Other) noexcept
+      : P(Other.P), Size(Other.Size), Hash(Other.Hash) {
+    Other.P = getEmptyKeyPtr();
+  }
+
+  ~CachedHashString() {
+    if (!isEmptyOrTombstone())
+      delete[] P;
+  }
+
+  StringRef val() const { return StringRef(P, Size); }
+  uint32_t size() const { return Size; }
+  uint32_t hash() const { return Hash; }
+
+  operator StringRef() const { return val(); }
+  operator CachedHashStringRef() const {
+    return CachedHashStringRef(val(), Hash);
+  }
+
+  friend void swap(CachedHashString &LHS, CachedHashString &RHS) {
+    using std::swap;
+    swap(LHS.P, RHS.P);
+    swap(LHS.Size, RHS.Size);
+    swap(LHS.Hash, RHS.Hash);
+  }
+};
+
+template <> struct DenseMapInfo<CachedHashString> {
+  static CachedHashString getEmptyKey() {
+    return CachedHashString(CachedHashString::ConstructEmptyOrTombstoneTy(),
+                            CachedHashString::getEmptyKeyPtr());
+  }
+  static CachedHashString getTombstoneKey() {
+    return CachedHashString(CachedHashString::ConstructEmptyOrTombstoneTy(),
+                            CachedHashString::getTombstoneKeyPtr());
+  }
+  static unsigned getHashValue(const CachedHashString &S) {
+    assert(!isEqual(S, getEmptyKey()) && "Cannot hash the empty key!");
+    assert(!isEqual(S, getTombstoneKey()) && "Cannot hash the tombstone key!");
+    return S.hash();
+  }
+  static bool isEqual(const CachedHashString &LHS,
+                      const CachedHashString &RHS) {
+    if (LHS.hash() != RHS.hash())
+      return false;
+    if (LHS.P == CachedHashString::getEmptyKeyPtr())
+      return RHS.P == CachedHashString::getEmptyKeyPtr();
+    if (LHS.P == CachedHashString::getTombstoneKeyPtr())
+      return RHS.P == CachedHashString::getTombstoneKeyPtr();
+
+    // This is safe because if RHS.P is the empty or tombstone key, it will have
+    // length 0, so we'll never dereference its pointer.
+    return LHS.val() == RHS.val();
+  }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/DAGDeltaAlgorithm.h b/linux-x64/clang/include/llvm/ADT/DAGDeltaAlgorithm.h
new file mode 100644
index 0000000..41fdd43
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/DAGDeltaAlgorithm.h
@@ -0,0 +1,79 @@
+//===- DAGDeltaAlgorithm.h - A DAG Minimization Algorithm ------*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DAGDELTAALGORITHM_H
+#define LLVM_ADT_DAGDELTAALGORITHM_H
+
+#include <set>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+/// DAGDeltaAlgorithm - Implements a "delta debugging" algorithm for minimizing
+/// directed acyclic graphs using a predicate function.
+///
+/// The result of the algorithm is a subset of the input change set which is
+/// guaranteed to satisfy the predicate, assuming that the input set did. For
+/// well formed predicates, the result set is guaranteed to be such that
+/// removing any single element not required by the dependencies on the other
+/// elements would falsify the predicate.
+///
+/// The DAG should be used to represent dependencies in the changes which are
+/// likely to hold across the predicate function. That is, for a particular
+/// changeset S and predicate P:
+///
+///   P(S) => P(S union pred(S))
+///
+/// The minization algorithm uses this dependency information to attempt to
+/// eagerly prune large subsets of changes. As with \see DeltaAlgorithm, the DAG
+/// is not required to satisfy this property, but the algorithm will run
+/// substantially fewer tests with appropriate dependencies. \see DeltaAlgorithm
+/// for more information on the properties which the predicate function itself
+/// should satisfy.
+class DAGDeltaAlgorithm {
+  virtual void anchor();
+
+public:
+  using change_ty = unsigned;
+  using edge_ty = std::pair<change_ty, change_ty>;
+
+  // FIXME: Use a decent data structure.
+  using changeset_ty = std::set<change_ty>;
+  using changesetlist_ty = std::vector<changeset_ty>;
+
+public:
+  virtual ~DAGDeltaAlgorithm() = default;
+
+  /// Run - Minimize the DAG formed by the \p Changes vertices and the
+  /// \p Dependencies edges by executing \see ExecuteOneTest() on subsets of
+  /// changes and returning the smallest set which still satisfies the test
+  /// predicate and the input \p Dependencies.
+  ///
+  /// \param Changes The list of changes.
+  ///
+  /// \param Dependencies The list of dependencies amongst changes. For each
+  /// (x,y) in \p Dependencies, both x and y must be in \p Changes. The
+  /// minimization algorithm guarantees that for each tested changed set S,
+  /// \f$ x \in S \f$ implies \f$ y \in S \f$. It is an error to have cyclic
+  /// dependencies.
+  changeset_ty Run(const changeset_ty &Changes,
+                   const std::vector<edge_ty> &Dependencies);
+
+  /// UpdatedSearchState - Callback used when the search state changes.
+  virtual void UpdatedSearchState(const changeset_ty &Changes,
+                                  const changesetlist_ty &Sets,
+                                  const changeset_ty &Required) {}
+
+  /// ExecuteOneTest - Execute a single test predicate on the change set \p S.
+  virtual bool ExecuteOneTest(const changeset_ty &S) = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DAGDELTAALGORITHM_H
diff --git a/linux-x64/clang/include/llvm/ADT/DeltaAlgorithm.h b/linux-x64/clang/include/llvm/ADT/DeltaAlgorithm.h
new file mode 100644
index 0000000..6becb2a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/DeltaAlgorithm.h
@@ -0,0 +1,93 @@
+//===- DeltaAlgorithm.h - A Set Minimization Algorithm ---------*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DELTAALGORITHM_H
+#define LLVM_ADT_DELTAALGORITHM_H
+
+#include <set>
+#include <vector>
+
+namespace llvm {
+
+/// DeltaAlgorithm - Implements the delta debugging algorithm (A. Zeller '99)
+/// for minimizing arbitrary sets using a predicate function.
+///
+/// The result of the algorithm is a subset of the input change set which is
+/// guaranteed to satisfy the predicate, assuming that the input set did. For
+/// well formed predicates, the result set is guaranteed to be such that
+/// removing any single element would falsify the predicate.
+///
+/// For best results the predicate function *should* (but need not) satisfy
+/// certain properties, in particular:
+///  (1) The predicate should return false on an empty set and true on the full
+///  set.
+///  (2) If the predicate returns true for a set of changes, it should return
+///  true for all supersets of that set.
+///
+/// It is not an error to provide a predicate that does not satisfy these
+/// requirements, and the algorithm will generally produce reasonable
+/// results. However, it may run substantially more tests than with a good
+/// predicate.
+class DeltaAlgorithm {
+public:
+  using change_ty = unsigned;
+  // FIXME: Use a decent data structure.
+  using changeset_ty = std::set<change_ty>;
+  using changesetlist_ty = std::vector<changeset_ty>;
+
+private:
+  /// Cache of failed test results. Successful test results are never cached
+  /// since we always reduce following a success.
+  std::set<changeset_ty> FailedTestsCache;
+
+  /// GetTestResult - Get the test result for the \p Changes from the
+  /// cache, executing the test if necessary.
+  ///
+  /// \param Changes - The change set to test.
+  /// \return - The test result.
+  bool GetTestResult(const changeset_ty &Changes);
+
+  /// Split - Partition a set of changes \p S into one or two subsets.
+  void Split(const changeset_ty &S, changesetlist_ty &Res);
+
+  /// Delta - Minimize a set of \p Changes which has been partioned into
+  /// smaller sets, by attempting to remove individual subsets.
+  changeset_ty Delta(const changeset_ty &Changes,
+                     const changesetlist_ty &Sets);
+
+  /// Search - Search for a subset (or subsets) in \p Sets which can be
+  /// removed from \p Changes while still satisfying the predicate.
+  ///
+  /// \param Res - On success, a subset of Changes which satisfies the
+  /// predicate.
+  /// \return - True on success.
+  bool Search(const changeset_ty &Changes, const changesetlist_ty &Sets,
+              changeset_ty &Res);
+
+protected:
+  /// UpdatedSearchState - Callback used when the search state changes.
+  virtual void UpdatedSearchState(const changeset_ty &Changes,
+                                  const changesetlist_ty &Sets) {}
+
+  /// ExecuteOneTest - Execute a single test predicate on the change set \p S.
+  virtual bool ExecuteOneTest(const changeset_ty &S) = 0;
+
+  DeltaAlgorithm& operator=(const DeltaAlgorithm&) = default;
+
+public:
+  virtual ~DeltaAlgorithm();
+
+  /// Run - Minimize the set \p Changes by executing \see ExecuteOneTest() on
+  /// subsets of changes and returning the smallest set which still satisfies
+  /// the test predicate.
+  changeset_ty Run(const changeset_ty &Changes);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DELTAALGORITHM_H
diff --git a/linux-x64/clang/include/llvm/ADT/DenseMap.h b/linux-x64/clang/include/llvm/ADT/DenseMap.h
new file mode 100644
index 0000000..ba60b79
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/DenseMap.h
@@ -0,0 +1,1224 @@
+//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DenseMap class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DENSEMAP_H
+#define LLVM_ADT_DENSEMAP_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/EpochTracker.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/ReverseIteration.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstring>
+#include <iterator>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+namespace detail {
+
+// We extend a pair to allow users to override the bucket type with their own
+// implementation without requiring two members.
+template <typename KeyT, typename ValueT>
+struct DenseMapPair : public std::pair<KeyT, ValueT> {
+  KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
+  const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
+  ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
+  const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
+};
+
+} // end namespace detail
+
+template <
+    typename KeyT, typename ValueT, typename KeyInfoT = DenseMapInfo<KeyT>,
+    typename Bucket = detail::DenseMapPair<KeyT, ValueT>, bool IsConst = false>
+class DenseMapIterator;
+
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+          typename BucketT>
+class DenseMapBase : public DebugEpochBase {
+  template <typename T>
+  using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
+
+public:
+  using size_type = unsigned;
+  using key_type = KeyT;
+  using mapped_type = ValueT;
+  using value_type = BucketT;
+
+  using iterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT>;
+  using const_iterator =
+      DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>;
+
+  inline iterator begin() {
+    // When the map is empty, avoid the overhead of advancing/retreating past
+    // empty buckets.
+    if (empty())
+      return end();
+    if (shouldReverseIterate<KeyT>())
+      return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
+    return makeIterator(getBuckets(), getBucketsEnd(), *this);
+  }
+  inline iterator end() {
+    return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
+  }
+  inline const_iterator begin() const {
+    if (empty())
+      return end();
+    if (shouldReverseIterate<KeyT>())
+      return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
+    return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
+  }
+  inline const_iterator end() const {
+    return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
+  }
+
+  LLVM_NODISCARD bool empty() const {
+    return getNumEntries() == 0;
+  }
+  unsigned size() const { return getNumEntries(); }
+
+  /// Grow the densemap so that it can contain at least \p NumEntries items
+  /// before resizing again.
+  void reserve(size_type NumEntries) {
+    auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
+    incrementEpoch();
+    if (NumBuckets > getNumBuckets())
+      grow(NumBuckets);
+  }
+
+  void clear() {
+    incrementEpoch();
+    if (getNumEntries() == 0 && getNumTombstones() == 0) return;
+
+    // If the capacity of the array is huge, and the # elements used is small,
+    // shrink the array.
+    if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
+      shrink_and_clear();
+      return;
+    }
+
+    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+    if (isPodLike<KeyT>::value && isPodLike<ValueT>::value) {
+      // Use a simpler loop when these are trivial types.
+      for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
+        P->getFirst() = EmptyKey;
+    } else {
+      unsigned NumEntries = getNumEntries();
+      for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+        if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
+          if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
+            P->getSecond().~ValueT();
+            --NumEntries;
+          }
+          P->getFirst() = EmptyKey;
+        }
+      }
+      assert(NumEntries == 0 && "Node count imbalance!");
+    }
+    setNumEntries(0);
+    setNumTombstones(0);
+  }
+
+  /// Return 1 if the specified key is in the map, 0 otherwise.
+  size_type count(const_arg_type_t<KeyT> Val) const {
+    const BucketT *TheBucket;
+    return LookupBucketFor(Val, TheBucket) ? 1 : 0;
+  }
+
+  iterator find(const_arg_type_t<KeyT> Val) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Val, TheBucket))
+      return makeIterator(TheBucket, getBucketsEnd(), *this, true);
+    return end();
+  }
+  const_iterator find(const_arg_type_t<KeyT> Val) const {
+    const BucketT *TheBucket;
+    if (LookupBucketFor(Val, TheBucket))
+      return makeConstIterator(TheBucket, getBucketsEnd(), *this, true);
+    return end();
+  }
+
+  /// Alternate version of find() which allows a different, and possibly
+  /// less expensive, key type.
+  /// The DenseMapInfo is responsible for supplying methods
+  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+  /// type used.
+  template<class LookupKeyT>
+  iterator find_as(const LookupKeyT &Val) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Val, TheBucket))
+      return makeIterator(TheBucket, getBucketsEnd(), *this, true);
+    return end();
+  }
+  template<class LookupKeyT>
+  const_iterator find_as(const LookupKeyT &Val) const {
+    const BucketT *TheBucket;
+    if (LookupBucketFor(Val, TheBucket))
+      return makeConstIterator(TheBucket, getBucketsEnd(), *this, true);
+    return end();
+  }
+
+  /// lookup - Return the entry for the specified key, or a default
+  /// constructed value if no such entry exists.
+  ValueT lookup(const_arg_type_t<KeyT> Val) const {
+    const BucketT *TheBucket;
+    if (LookupBucketFor(Val, TheBucket))
+      return TheBucket->getSecond();
+    return ValueT();
+  }
+
+  // Inserts key,value pair into the map if the key isn't already in the map.
+  // If the key is already in the map, it returns false and doesn't update the
+  // value.
+  std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
+    return try_emplace(KV.first, KV.second);
+  }
+
+  // Inserts key,value pair into the map if the key isn't already in the map.
+  // If the key is already in the map, it returns false and doesn't update the
+  // value.
+  std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
+    return try_emplace(std::move(KV.first), std::move(KV.second));
+  }
+
+  // Inserts key,value pair into the map if the key isn't already in the map.
+  // The value is constructed in-place if the key is not in the map, otherwise
+  // it is not moved.
+  template <typename... Ts>
+  std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return std::make_pair(
+               makeIterator(TheBucket, getBucketsEnd(), *this, true),
+               false); // Already in map.
+
+    // Otherwise, insert the new element.
+    TheBucket =
+        InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
+    return std::make_pair(
+             makeIterator(TheBucket, getBucketsEnd(), *this, true),
+             true);
+  }
+
+  // Inserts key,value pair into the map if the key isn't already in the map.
+  // The value is constructed in-place if the key is not in the map, otherwise
+  // it is not moved.
+  template <typename... Ts>
+  std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return std::make_pair(
+               makeIterator(TheBucket, getBucketsEnd(), *this, true),
+               false); // Already in map.
+
+    // Otherwise, insert the new element.
+    TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
+    return std::make_pair(
+             makeIterator(TheBucket, getBucketsEnd(), *this, true),
+             true);
+  }
+
+  /// Alternate version of insert() which allows a different, and possibly
+  /// less expensive, key type.
+  /// The DenseMapInfo is responsible for supplying methods
+  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+  /// type used.
+  template <typename LookupKeyT>
+  std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
+                                      const LookupKeyT &Val) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Val, TheBucket))
+      return std::make_pair(
+               makeIterator(TheBucket, getBucketsEnd(), *this, true),
+               false); // Already in map.
+
+    // Otherwise, insert the new element.
+    TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
+                                           std::move(KV.second), Val);
+    return std::make_pair(
+             makeIterator(TheBucket, getBucketsEnd(), *this, true),
+             true);
+  }
+
+  /// insert - Range insertion of pairs.
+  template<typename InputIt>
+  void insert(InputIt I, InputIt E) {
+    for (; I != E; ++I)
+      insert(*I);
+  }
+
+  bool erase(const KeyT &Val) {
+    BucketT *TheBucket;
+    if (!LookupBucketFor(Val, TheBucket))
+      return false; // not in map.
+
+    TheBucket->getSecond().~ValueT();
+    TheBucket->getFirst() = getTombstoneKey();
+    decrementNumEntries();
+    incrementNumTombstones();
+    return true;
+  }
+  void erase(iterator I) {
+    BucketT *TheBucket = &*I;
+    TheBucket->getSecond().~ValueT();
+    TheBucket->getFirst() = getTombstoneKey();
+    decrementNumEntries();
+    incrementNumTombstones();
+  }
+
+  value_type& FindAndConstruct(const KeyT &Key) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return *TheBucket;
+
+    return *InsertIntoBucket(TheBucket, Key);
+  }
+
+  ValueT &operator[](const KeyT &Key) {
+    return FindAndConstruct(Key).second;
+  }
+
+  value_type& FindAndConstruct(KeyT &&Key) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return *TheBucket;
+
+    return *InsertIntoBucket(TheBucket, std::move(Key));
+  }
+
+  ValueT &operator[](KeyT &&Key) {
+    return FindAndConstruct(std::move(Key)).second;
+  }
+
+  /// isPointerIntoBucketsArray - Return true if the specified pointer points
+  /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
+  /// value in the DenseMap).
+  bool isPointerIntoBucketsArray(const void *Ptr) const {
+    return Ptr >= getBuckets() && Ptr < getBucketsEnd();
+  }
+
+  /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
+  /// array.  In conjunction with the previous method, this can be used to
+  /// determine whether an insertion caused the DenseMap to reallocate.
+  const void *getPointerIntoBucketsArray() const { return getBuckets(); }
+
+protected:
+  DenseMapBase() = default;
+
+  void destroyAll() {
+    if (getNumBuckets() == 0) // Nothing to do.
+      return;
+
+    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+    for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+      if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
+          !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
+        P->getSecond().~ValueT();
+      P->getFirst().~KeyT();
+    }
+  }
+
+  void initEmpty() {
+    setNumEntries(0);
+    setNumTombstones(0);
+
+    assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
+           "# initial buckets must be a power of two!");
+    const KeyT EmptyKey = getEmptyKey();
+    for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
+      ::new (&B->getFirst()) KeyT(EmptyKey);
+  }
+
+  /// Returns the number of buckets to allocate to ensure that the DenseMap can
+  /// accommodate \p NumEntries without need to grow().
+  unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
+    // Ensure that "NumEntries * 4 < NumBuckets * 3"
+    if (NumEntries == 0)
+      return 0;
+    // +1 is required because of the strict equality.
+    // For example if NumEntries is 48, we need to return 401.
+    return NextPowerOf2(NumEntries * 4 / 3 + 1);
+  }
+
+  void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
+    initEmpty();
+
+    // Insert all the old elements.
+    const KeyT EmptyKey = getEmptyKey();
+    const KeyT TombstoneKey = getTombstoneKey();
+    for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
+      if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
+          !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
+        // Insert the key/value into the new table.
+        BucketT *DestBucket;
+        bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
+        (void)FoundVal; // silence warning.
+        assert(!FoundVal && "Key already in new map?");
+        DestBucket->getFirst() = std::move(B->getFirst());
+        ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
+        incrementNumEntries();
+
+        // Free the value.
+        B->getSecond().~ValueT();
+      }
+      B->getFirst().~KeyT();
+    }
+  }
+
+  template <typename OtherBaseT>
+  void copyFrom(
+      const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
+    assert(&other != this);
+    assert(getNumBuckets() == other.getNumBuckets());
+
+    setNumEntries(other.getNumEntries());
+    setNumTombstones(other.getNumTombstones());
+
+    if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
+      memcpy(getBuckets(), other.getBuckets(),
+             getNumBuckets() * sizeof(BucketT));
+    else
+      for (size_t i = 0; i < getNumBuckets(); ++i) {
+        ::new (&getBuckets()[i].getFirst())
+            KeyT(other.getBuckets()[i].getFirst());
+        if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
+            !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
+          ::new (&getBuckets()[i].getSecond())
+              ValueT(other.getBuckets()[i].getSecond());
+      }
+  }
+
+  static unsigned getHashValue(const KeyT &Val) {
+    return KeyInfoT::getHashValue(Val);
+  }
+
+  template<typename LookupKeyT>
+  static unsigned getHashValue(const LookupKeyT &Val) {
+    return KeyInfoT::getHashValue(Val);
+  }
+
+  static const KeyT getEmptyKey() {
+    static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
+                  "Must pass the derived type to this template!");
+    return KeyInfoT::getEmptyKey();
+  }
+
+  static const KeyT getTombstoneKey() {
+    return KeyInfoT::getTombstoneKey();
+  }
+
+private:
+  iterator makeIterator(BucketT *P, BucketT *E,
+                        DebugEpochBase &Epoch,
+                        bool NoAdvance=false) {
+    if (shouldReverseIterate<KeyT>()) {
+      BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
+      return iterator(B, E, Epoch, NoAdvance);
+    }
+    return iterator(P, E, Epoch, NoAdvance);
+  }
+
+  const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
+                                   const DebugEpochBase &Epoch,
+                                   const bool NoAdvance=false) const {
+    if (shouldReverseIterate<KeyT>()) {
+      const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
+      return const_iterator(B, E, Epoch, NoAdvance);
+    }
+    return const_iterator(P, E, Epoch, NoAdvance);
+  }
+
+  unsigned getNumEntries() const {
+    return static_cast<const DerivedT *>(this)->getNumEntries();
+  }
+
+  void setNumEntries(unsigned Num) {
+    static_cast<DerivedT *>(this)->setNumEntries(Num);
+  }
+
+  void incrementNumEntries() {
+    setNumEntries(getNumEntries() + 1);
+  }
+
+  void decrementNumEntries() {
+    setNumEntries(getNumEntries() - 1);
+  }
+
+  unsigned getNumTombstones() const {
+    return static_cast<const DerivedT *>(this)->getNumTombstones();
+  }
+
+  void setNumTombstones(unsigned Num) {
+    static_cast<DerivedT *>(this)->setNumTombstones(Num);
+  }
+
+  void incrementNumTombstones() {
+    setNumTombstones(getNumTombstones() + 1);
+  }
+
+  void decrementNumTombstones() {
+    setNumTombstones(getNumTombstones() - 1);
+  }
+
+  const BucketT *getBuckets() const {
+    return static_cast<const DerivedT *>(this)->getBuckets();
+  }
+
+  BucketT *getBuckets() {
+    return static_cast<DerivedT *>(this)->getBuckets();
+  }
+
+  unsigned getNumBuckets() const {
+    return static_cast<const DerivedT *>(this)->getNumBuckets();
+  }
+
+  BucketT *getBucketsEnd() {
+    return getBuckets() + getNumBuckets();
+  }
+
+  const BucketT *getBucketsEnd() const {
+    return getBuckets() + getNumBuckets();
+  }
+
+  void grow(unsigned AtLeast) {
+    static_cast<DerivedT *>(this)->grow(AtLeast);
+  }
+
+  void shrink_and_clear() {
+    static_cast<DerivedT *>(this)->shrink_and_clear();
+  }
+
+  template <typename KeyArg, typename... ValueArgs>
+  BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
+                            ValueArgs &&... Values) {
+    TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
+
+    TheBucket->getFirst() = std::forward<KeyArg>(Key);
+    ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
+    return TheBucket;
+  }
+
+  template <typename LookupKeyT>
+  BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
+                                      ValueT &&Value, LookupKeyT &Lookup) {
+    TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
+
+    TheBucket->getFirst() = std::move(Key);
+    ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
+    return TheBucket;
+  }
+
+  template <typename LookupKeyT>
+  BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
+                                BucketT *TheBucket) {
+    incrementEpoch();
+
+    // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
+    // the buckets are empty (meaning that many are filled with tombstones),
+    // grow the table.
+    //
+    // The later case is tricky.  For example, if we had one empty bucket with
+    // tons of tombstones, failing lookups (e.g. for insertion) would have to
+    // probe almost the entire table until it found the empty bucket.  If the
+    // table completely filled with tombstones, no lookup would ever succeed,
+    // causing infinite loops in lookup.
+    unsigned NewNumEntries = getNumEntries() + 1;
+    unsigned NumBuckets = getNumBuckets();
+    if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
+      this->grow(NumBuckets * 2);
+      LookupBucketFor(Lookup, TheBucket);
+      NumBuckets = getNumBuckets();
+    } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
+                             NumBuckets/8)) {
+      this->grow(NumBuckets);
+      LookupBucketFor(Lookup, TheBucket);
+    }
+    assert(TheBucket);
+
+    // Only update the state after we've grown our bucket space appropriately
+    // so that when growing buckets we have self-consistent entry count.
+    incrementNumEntries();
+
+    // If we are writing over a tombstone, remember this.
+    const KeyT EmptyKey = getEmptyKey();
+    if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
+      decrementNumTombstones();
+
+    return TheBucket;
+  }
+
+  /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
+  /// FoundBucket.  If the bucket contains the key and a value, this returns
+  /// true, otherwise it returns a bucket with an empty marker or tombstone and
+  /// returns false.
+  template<typename LookupKeyT>
+  bool LookupBucketFor(const LookupKeyT &Val,
+                       const BucketT *&FoundBucket) const {
+    const BucketT *BucketsPtr = getBuckets();
+    const unsigned NumBuckets = getNumBuckets();
+
+    if (NumBuckets == 0) {
+      FoundBucket = nullptr;
+      return false;
+    }
+
+    // FoundTombstone - Keep track of whether we find a tombstone while probing.
+    const BucketT *FoundTombstone = nullptr;
+    const KeyT EmptyKey = getEmptyKey();
+    const KeyT TombstoneKey = getTombstoneKey();
+    assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
+           !KeyInfoT::isEqual(Val, TombstoneKey) &&
+           "Empty/Tombstone value shouldn't be inserted into map!");
+
+    unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
+    unsigned ProbeAmt = 1;
+    while (true) {
+      const BucketT *ThisBucket = BucketsPtr + BucketNo;
+      // Found Val's bucket?  If so, return it.
+      if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
+        FoundBucket = ThisBucket;
+        return true;
+      }
+
+      // If we found an empty bucket, the key doesn't exist in the set.
+      // Insert it and return the default value.
+      if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
+        // If we've already seen a tombstone while probing, fill it in instead
+        // of the empty bucket we eventually probed to.
+        FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
+        return false;
+      }
+
+      // If this is a tombstone, remember it.  If Val ends up not in the map, we
+      // prefer to return it than something that would require more probing.
+      if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
+          !FoundTombstone)
+        FoundTombstone = ThisBucket;  // Remember the first tombstone found.
+
+      // Otherwise, it's a hash collision or a tombstone, continue quadratic
+      // probing.
+      BucketNo += ProbeAmt++;
+      BucketNo &= (NumBuckets-1);
+    }
+  }
+
+  template <typename LookupKeyT>
+  bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
+    const BucketT *ConstFoundBucket;
+    bool Result = const_cast<const DenseMapBase *>(this)
+      ->LookupBucketFor(Val, ConstFoundBucket);
+    FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
+    return Result;
+  }
+
+public:
+  /// Return the approximate size (in bytes) of the actual map.
+  /// This is just the raw memory used by DenseMap.
+  /// If entries are pointers to objects, the size of the referenced objects
+  /// are not included.
+  size_t getMemorySize() const {
+    return getNumBuckets() * sizeof(BucketT);
+  }
+};
+
+template <typename KeyT, typename ValueT,
+          typename KeyInfoT = DenseMapInfo<KeyT>,
+          typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
+class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
+                                     KeyT, ValueT, KeyInfoT, BucketT> {
+  friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+  // Lift some types from the dependent base class into this class for
+  // simplicity of referring to them.
+  using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+  BucketT *Buckets;
+  unsigned NumEntries;
+  unsigned NumTombstones;
+  unsigned NumBuckets;
+
+public:
+  /// Create a DenseMap wth an optional \p InitialReserve that guarantee that
+  /// this number of elements can be inserted in the map without grow()
+  explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
+
+  DenseMap(const DenseMap &other) : BaseT() {
+    init(0);
+    copyFrom(other);
+  }
+
+  DenseMap(DenseMap &&other) : BaseT() {
+    init(0);
+    swap(other);
+  }
+
+  template<typename InputIt>
+  DenseMap(const InputIt &I, const InputIt &E) {
+    init(std::distance(I, E));
+    this->insert(I, E);
+  }
+
+  ~DenseMap() {
+    this->destroyAll();
+    operator delete(Buckets);
+  }
+
+  void swap(DenseMap& RHS) {
+    this->incrementEpoch();
+    RHS.incrementEpoch();
+    std::swap(Buckets, RHS.Buckets);
+    std::swap(NumEntries, RHS.NumEntries);
+    std::swap(NumTombstones, RHS.NumTombstones);
+    std::swap(NumBuckets, RHS.NumBuckets);
+  }
+
+  DenseMap& operator=(const DenseMap& other) {
+    if (&other != this)
+      copyFrom(other);
+    return *this;
+  }
+
+  DenseMap& operator=(DenseMap &&other) {
+    this->destroyAll();
+    operator delete(Buckets);
+    init(0);
+    swap(other);
+    return *this;
+  }
+
+  void copyFrom(const DenseMap& other) {
+    this->destroyAll();
+    operator delete(Buckets);
+    if (allocateBuckets(other.NumBuckets)) {
+      this->BaseT::copyFrom(other);
+    } else {
+      NumEntries = 0;
+      NumTombstones = 0;
+    }
+  }
+
+  void init(unsigned InitNumEntries) {
+    auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
+    if (allocateBuckets(InitBuckets)) {
+      this->BaseT::initEmpty();
+    } else {
+      NumEntries = 0;
+      NumTombstones = 0;
+    }
+  }
+
+  void grow(unsigned AtLeast) {
+    unsigned OldNumBuckets = NumBuckets;
+    BucketT *OldBuckets = Buckets;
+
+    allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
+    assert(Buckets);
+    if (!OldBuckets) {
+      this->BaseT::initEmpty();
+      return;
+    }
+
+    this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
+
+    // Free the old table.
+    operator delete(OldBuckets);
+  }
+
+  void shrink_and_clear() {
+    unsigned OldNumEntries = NumEntries;
+    this->destroyAll();
+
+    // Reduce the number of buckets.
+    unsigned NewNumBuckets = 0;
+    if (OldNumEntries)
+      NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
+    if (NewNumBuckets == NumBuckets) {
+      this->BaseT::initEmpty();
+      return;
+    }
+
+    operator delete(Buckets);
+    init(NewNumBuckets);
+  }
+
+private:
+  unsigned getNumEntries() const {
+    return NumEntries;
+  }
+
+  void setNumEntries(unsigned Num) {
+    NumEntries = Num;
+  }
+
+  unsigned getNumTombstones() const {
+    return NumTombstones;
+  }
+
+  void setNumTombstones(unsigned Num) {
+    NumTombstones = Num;
+  }
+
+  BucketT *getBuckets() const {
+    return Buckets;
+  }
+
+  unsigned getNumBuckets() const {
+    return NumBuckets;
+  }
+
+  bool allocateBuckets(unsigned Num) {
+    NumBuckets = Num;
+    if (NumBuckets == 0) {
+      Buckets = nullptr;
+      return false;
+    }
+
+    Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
+    return true;
+  }
+};
+
+template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
+          typename KeyInfoT = DenseMapInfo<KeyT>,
+          typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
+class SmallDenseMap
+    : public DenseMapBase<
+          SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
+          ValueT, KeyInfoT, BucketT> {
+  friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+  // Lift some types from the dependent base class into this class for
+  // simplicity of referring to them.
+  using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+  static_assert(isPowerOf2_64(InlineBuckets),
+                "InlineBuckets must be a power of 2.");
+
+  unsigned Small : 1;
+  unsigned NumEntries : 31;
+  unsigned NumTombstones;
+
+  struct LargeRep {
+    BucketT *Buckets;
+    unsigned NumBuckets;
+  };
+
+  /// A "union" of an inline bucket array and the struct representing
+  /// a large bucket. This union will be discriminated by the 'Small' bit.
+  AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
+
+public:
+  explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
+    init(NumInitBuckets);
+  }
+
+  SmallDenseMap(const SmallDenseMap &other) : BaseT() {
+    init(0);
+    copyFrom(other);
+  }
+
+  SmallDenseMap(SmallDenseMap &&other) : BaseT() {
+    init(0);
+    swap(other);
+  }
+
+  template<typename InputIt>
+  SmallDenseMap(const InputIt &I, const InputIt &E) {
+    init(NextPowerOf2(std::distance(I, E)));
+    this->insert(I, E);
+  }
+
+  ~SmallDenseMap() {
+    this->destroyAll();
+    deallocateBuckets();
+  }
+
+  void swap(SmallDenseMap& RHS) {
+    unsigned TmpNumEntries = RHS.NumEntries;
+    RHS.NumEntries = NumEntries;
+    NumEntries = TmpNumEntries;
+    std::swap(NumTombstones, RHS.NumTombstones);
+
+    const KeyT EmptyKey = this->getEmptyKey();
+    const KeyT TombstoneKey = this->getTombstoneKey();
+    if (Small && RHS.Small) {
+      // If we're swapping inline bucket arrays, we have to cope with some of
+      // the tricky bits of DenseMap's storage system: the buckets are not
+      // fully initialized. Thus we swap every key, but we may have
+      // a one-directional move of the value.
+      for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
+        BucketT *LHSB = &getInlineBuckets()[i],
+                *RHSB = &RHS.getInlineBuckets()[i];
+        bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
+                            !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
+        bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
+                            !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
+        if (hasLHSValue && hasRHSValue) {
+          // Swap together if we can...
+          std::swap(*LHSB, *RHSB);
+          continue;
+        }
+        // Swap separately and handle any assymetry.
+        std::swap(LHSB->getFirst(), RHSB->getFirst());
+        if (hasLHSValue) {
+          ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
+          LHSB->getSecond().~ValueT();
+        } else if (hasRHSValue) {
+          ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
+          RHSB->getSecond().~ValueT();
+        }
+      }
+      return;
+    }
+    if (!Small && !RHS.Small) {
+      std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
+      std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
+      return;
+    }
+
+    SmallDenseMap &SmallSide = Small ? *this : RHS;
+    SmallDenseMap &LargeSide = Small ? RHS : *this;
+
+    // First stash the large side's rep and move the small side across.
+    LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
+    LargeSide.getLargeRep()->~LargeRep();
+    LargeSide.Small = true;
+    // This is similar to the standard move-from-old-buckets, but the bucket
+    // count hasn't actually rotated in this case. So we have to carefully
+    // move construct the keys and values into their new locations, but there
+    // is no need to re-hash things.
+    for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
+      BucketT *NewB = &LargeSide.getInlineBuckets()[i],
+              *OldB = &SmallSide.getInlineBuckets()[i];
+      ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
+      OldB->getFirst().~KeyT();
+      if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
+          !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
+        ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
+        OldB->getSecond().~ValueT();
+      }
+    }
+
+    // The hard part of moving the small buckets across is done, just move
+    // the TmpRep into its new home.
+    SmallSide.Small = false;
+    new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
+  }
+
+  SmallDenseMap& operator=(const SmallDenseMap& other) {
+    if (&other != this)
+      copyFrom(other);
+    return *this;
+  }
+
+  SmallDenseMap& operator=(SmallDenseMap &&other) {
+    this->destroyAll();
+    deallocateBuckets();
+    init(0);
+    swap(other);
+    return *this;
+  }
+
+  void copyFrom(const SmallDenseMap& other) {
+    this->destroyAll();
+    deallocateBuckets();
+    Small = true;
+    if (other.getNumBuckets() > InlineBuckets) {
+      Small = false;
+      new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
+    }
+    this->BaseT::copyFrom(other);
+  }
+
+  void init(unsigned InitBuckets) {
+    Small = true;
+    if (InitBuckets > InlineBuckets) {
+      Small = false;
+      new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
+    }
+    this->BaseT::initEmpty();
+  }
+
+  void grow(unsigned AtLeast) {
+    if (AtLeast >= InlineBuckets)
+      AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
+
+    if (Small) {
+      if (AtLeast < InlineBuckets)
+        return; // Nothing to do.
+
+      // First move the inline buckets into a temporary storage.
+      AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
+      BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
+      BucketT *TmpEnd = TmpBegin;
+
+      // Loop over the buckets, moving non-empty, non-tombstones into the
+      // temporary storage. Have the loop move the TmpEnd forward as it goes.
+      const KeyT EmptyKey = this->getEmptyKey();
+      const KeyT TombstoneKey = this->getTombstoneKey();
+      for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
+        if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
+            !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
+          assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
+                 "Too many inline buckets!");
+          ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
+          ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
+          ++TmpEnd;
+          P->getSecond().~ValueT();
+        }
+        P->getFirst().~KeyT();
+      }
+
+      // Now make this map use the large rep, and move all the entries back
+      // into it.
+      Small = false;
+      new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
+      this->moveFromOldBuckets(TmpBegin, TmpEnd);
+      return;
+    }
+
+    LargeRep OldRep = std::move(*getLargeRep());
+    getLargeRep()->~LargeRep();
+    if (AtLeast <= InlineBuckets) {
+      Small = true;
+    } else {
+      new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
+    }
+
+    this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
+
+    // Free the old table.
+    operator delete(OldRep.Buckets);
+  }
+
+  void shrink_and_clear() {
+    unsigned OldSize = this->size();
+    this->destroyAll();
+
+    // Reduce the number of buckets.
+    unsigned NewNumBuckets = 0;
+    if (OldSize) {
+      NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
+      if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
+        NewNumBuckets = 64;
+    }
+    if ((Small && NewNumBuckets <= InlineBuckets) ||
+        (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
+      this->BaseT::initEmpty();
+      return;
+    }
+
+    deallocateBuckets();
+    init(NewNumBuckets);
+  }
+
+private:
+  unsigned getNumEntries() const {
+    return NumEntries;
+  }
+
+  void setNumEntries(unsigned Num) {
+    // NumEntries is hardcoded to be 31 bits wide.
+    assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
+    NumEntries = Num;
+  }
+
+  unsigned getNumTombstones() const {
+    return NumTombstones;
+  }
+
+  void setNumTombstones(unsigned Num) {
+    NumTombstones = Num;
+  }
+
+  const BucketT *getInlineBuckets() const {
+    assert(Small);
+    // Note that this cast does not violate aliasing rules as we assert that
+    // the memory's dynamic type is the small, inline bucket buffer, and the
+    // 'storage.buffer' static type is 'char *'.
+    return reinterpret_cast<const BucketT *>(storage.buffer);
+  }
+
+  BucketT *getInlineBuckets() {
+    return const_cast<BucketT *>(
+      const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
+  }
+
+  const LargeRep *getLargeRep() const {
+    assert(!Small);
+    // Note, same rule about aliasing as with getInlineBuckets.
+    return reinterpret_cast<const LargeRep *>(storage.buffer);
+  }
+
+  LargeRep *getLargeRep() {
+    return const_cast<LargeRep *>(
+      const_cast<const SmallDenseMap *>(this)->getLargeRep());
+  }
+
+  const BucketT *getBuckets() const {
+    return Small ? getInlineBuckets() : getLargeRep()->Buckets;
+  }
+
+  BucketT *getBuckets() {
+    return const_cast<BucketT *>(
+      const_cast<const SmallDenseMap *>(this)->getBuckets());
+  }
+
+  unsigned getNumBuckets() const {
+    return Small ? InlineBuckets : getLargeRep()->NumBuckets;
+  }
+
+  void deallocateBuckets() {
+    if (Small)
+      return;
+
+    operator delete(getLargeRep()->Buckets);
+    getLargeRep()->~LargeRep();
+  }
+
+  LargeRep allocateBuckets(unsigned Num) {
+    assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
+    LargeRep Rep = {
+      static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
+    };
+    return Rep;
+  }
+};
+
+template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
+          bool IsConst>
+class DenseMapIterator : DebugEpochBase::HandleBase {
+  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
+  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
+
+  using ConstIterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
+
+public:
+  using difference_type = ptrdiff_t;
+  using value_type =
+      typename std::conditional<IsConst, const Bucket, Bucket>::type;
+  using pointer = value_type *;
+  using reference = value_type &;
+  using iterator_category = std::forward_iterator_tag;
+
+private:
+  pointer Ptr = nullptr;
+  pointer End = nullptr;
+
+public:
+  DenseMapIterator() = default;
+
+  DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
+                   bool NoAdvance = false)
+      : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
+    assert(isHandleInSync() && "invalid construction!");
+
+    if (NoAdvance) return;
+    if (shouldReverseIterate<KeyT>()) {
+      RetreatPastEmptyBuckets();
+      return;
+    }
+    AdvancePastEmptyBuckets();
+  }
+
+  // Converting ctor from non-const iterators to const iterators. SFINAE'd out
+  // for const iterator destinations so it doesn't end up as a user defined copy
+  // constructor.
+  template <bool IsConstSrc,
+            typename = typename std::enable_if<!IsConstSrc && IsConst>::type>
+  DenseMapIterator(
+      const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
+      : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
+
+  reference operator*() const {
+    assert(isHandleInSync() && "invalid iterator access!");
+    if (shouldReverseIterate<KeyT>())
+      return Ptr[-1];
+    return *Ptr;
+  }
+  pointer operator->() const {
+    assert(isHandleInSync() && "invalid iterator access!");
+    if (shouldReverseIterate<KeyT>())
+      return &(Ptr[-1]);
+    return Ptr;
+  }
+
+  bool operator==(const ConstIterator &RHS) const {
+    assert((!Ptr || isHandleInSync()) && "handle not in sync!");
+    assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
+    assert(getEpochAddress() == RHS.getEpochAddress() &&
+           "comparing incomparable iterators!");
+    return Ptr == RHS.Ptr;
+  }
+  bool operator!=(const ConstIterator &RHS) const {
+    assert((!Ptr || isHandleInSync()) && "handle not in sync!");
+    assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
+    assert(getEpochAddress() == RHS.getEpochAddress() &&
+           "comparing incomparable iterators!");
+    return Ptr != RHS.Ptr;
+  }
+
+  inline DenseMapIterator& operator++() {  // Preincrement
+    assert(isHandleInSync() && "invalid iterator access!");
+    if (shouldReverseIterate<KeyT>()) {
+      --Ptr;
+      RetreatPastEmptyBuckets();
+      return *this;
+    }
+    ++Ptr;
+    AdvancePastEmptyBuckets();
+    return *this;
+  }
+  DenseMapIterator operator++(int) {  // Postincrement
+    assert(isHandleInSync() && "invalid iterator access!");
+    DenseMapIterator tmp = *this; ++*this; return tmp;
+  }
+
+private:
+  void AdvancePastEmptyBuckets() {
+    assert(Ptr <= End);
+    const KeyT Empty = KeyInfoT::getEmptyKey();
+    const KeyT Tombstone = KeyInfoT::getTombstoneKey();
+
+    while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
+                          KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
+      ++Ptr;
+  }
+
+  void RetreatPastEmptyBuckets() {
+    assert(Ptr >= End);
+    const KeyT Empty = KeyInfoT::getEmptyKey();
+    const KeyT Tombstone = KeyInfoT::getTombstoneKey();
+
+    while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
+                          KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
+      --Ptr;
+  }
+};
+
+template <typename KeyT, typename ValueT, typename KeyInfoT>
+inline size_t capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
+  return X.getMemorySize();
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DENSEMAP_H
diff --git a/linux-x64/clang/include/llvm/ADT/DenseMapInfo.h b/linux-x64/clang/include/llvm/ADT/DenseMapInfo.h
new file mode 100644
index 0000000..a96904c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/DenseMapInfo.h
@@ -0,0 +1,267 @@
+//===- llvm/ADT/DenseMapInfo.h - Type traits for DenseMap -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines DenseMapInfo traits for DenseMap.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DENSEMAPINFO_H
+#define LLVM_ADT_DENSEMAPINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+template<typename T>
+struct DenseMapInfo {
+  //static inline T getEmptyKey();
+  //static inline T getTombstoneKey();
+  //static unsigned getHashValue(const T &Val);
+  //static bool isEqual(const T &LHS, const T &RHS);
+};
+
+// Provide DenseMapInfo for all pointers.
+template<typename T>
+struct DenseMapInfo<T*> {
+  static inline T* getEmptyKey() {
+    uintptr_t Val = static_cast<uintptr_t>(-1);
+    Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
+    return reinterpret_cast<T*>(Val);
+  }
+
+  static inline T* getTombstoneKey() {
+    uintptr_t Val = static_cast<uintptr_t>(-2);
+    Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
+    return reinterpret_cast<T*>(Val);
+  }
+
+  static unsigned getHashValue(const T *PtrVal) {
+    return (unsigned((uintptr_t)PtrVal) >> 4) ^
+           (unsigned((uintptr_t)PtrVal) >> 9);
+  }
+
+  static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; }
+};
+
+// Provide DenseMapInfo for chars.
+template<> struct DenseMapInfo<char> {
+  static inline char getEmptyKey() { return ~0; }
+  static inline char getTombstoneKey() { return ~0 - 1; }
+  static unsigned getHashValue(const char& Val) { return Val * 37U; }
+
+  static bool isEqual(const char &LHS, const char &RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for unsigned shorts.
+template <> struct DenseMapInfo<unsigned short> {
+  static inline unsigned short getEmptyKey() { return 0xFFFF; }
+  static inline unsigned short getTombstoneKey() { return 0xFFFF - 1; }
+  static unsigned getHashValue(const unsigned short &Val) { return Val * 37U; }
+
+  static bool isEqual(const unsigned short &LHS, const unsigned short &RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for unsigned ints.
+template<> struct DenseMapInfo<unsigned> {
+  static inline unsigned getEmptyKey() { return ~0U; }
+  static inline unsigned getTombstoneKey() { return ~0U - 1; }
+  static unsigned getHashValue(const unsigned& Val) { return Val * 37U; }
+
+  static bool isEqual(const unsigned& LHS, const unsigned& RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for unsigned longs.
+template<> struct DenseMapInfo<unsigned long> {
+  static inline unsigned long getEmptyKey() { return ~0UL; }
+  static inline unsigned long getTombstoneKey() { return ~0UL - 1L; }
+
+  static unsigned getHashValue(const unsigned long& Val) {
+    return (unsigned)(Val * 37UL);
+  }
+
+  static bool isEqual(const unsigned long& LHS, const unsigned long& RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for unsigned long longs.
+template<> struct DenseMapInfo<unsigned long long> {
+  static inline unsigned long long getEmptyKey() { return ~0ULL; }
+  static inline unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; }
+
+  static unsigned getHashValue(const unsigned long long& Val) {
+    return (unsigned)(Val * 37ULL);
+  }
+
+  static bool isEqual(const unsigned long long& LHS,
+                      const unsigned long long& RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for shorts.
+template <> struct DenseMapInfo<short> {
+  static inline short getEmptyKey() { return 0x7FFF; }
+  static inline short getTombstoneKey() { return -0x7FFF - 1; }
+  static unsigned getHashValue(const short &Val) { return Val * 37U; }
+  static bool isEqual(const short &LHS, const short &RHS) { return LHS == RHS; }
+};
+
+// Provide DenseMapInfo for ints.
+template<> struct DenseMapInfo<int> {
+  static inline int getEmptyKey() { return 0x7fffffff; }
+  static inline int getTombstoneKey() { return -0x7fffffff - 1; }
+  static unsigned getHashValue(const int& Val) { return (unsigned)(Val * 37U); }
+
+  static bool isEqual(const int& LHS, const int& RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for longs.
+template<> struct DenseMapInfo<long> {
+  static inline long getEmptyKey() {
+    return (1UL << (sizeof(long) * 8 - 1)) - 1UL;
+  }
+
+  static inline long getTombstoneKey() { return getEmptyKey() - 1L; }
+
+  static unsigned getHashValue(const long& Val) {
+    return (unsigned)(Val * 37UL);
+  }
+
+  static bool isEqual(const long& LHS, const long& RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for long longs.
+template<> struct DenseMapInfo<long long> {
+  static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; }
+  static inline long long getTombstoneKey() { return -0x7fffffffffffffffLL-1; }
+
+  static unsigned getHashValue(const long long& Val) {
+    return (unsigned)(Val * 37ULL);
+  }
+
+  static bool isEqual(const long long& LHS,
+                      const long long& RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for all pairs whose members have info.
+template<typename T, typename U>
+struct DenseMapInfo<std::pair<T, U>> {
+  using Pair = std::pair<T, U>;
+  using FirstInfo = DenseMapInfo<T>;
+  using SecondInfo = DenseMapInfo<U>;
+
+  static inline Pair getEmptyKey() {
+    return std::make_pair(FirstInfo::getEmptyKey(),
+                          SecondInfo::getEmptyKey());
+  }
+
+  static inline Pair getTombstoneKey() {
+    return std::make_pair(FirstInfo::getTombstoneKey(),
+                          SecondInfo::getTombstoneKey());
+  }
+
+  static unsigned getHashValue(const Pair& PairVal) {
+    uint64_t key = (uint64_t)FirstInfo::getHashValue(PairVal.first) << 32
+          | (uint64_t)SecondInfo::getHashValue(PairVal.second);
+    key += ~(key << 32);
+    key ^= (key >> 22);
+    key += ~(key << 13);
+    key ^= (key >> 8);
+    key += (key << 3);
+    key ^= (key >> 15);
+    key += ~(key << 27);
+    key ^= (key >> 31);
+    return (unsigned)key;
+  }
+
+  static bool isEqual(const Pair &LHS, const Pair &RHS) {
+    return FirstInfo::isEqual(LHS.first, RHS.first) &&
+           SecondInfo::isEqual(LHS.second, RHS.second);
+  }
+};
+
+// Provide DenseMapInfo for StringRefs.
+template <> struct DenseMapInfo<StringRef> {
+  static inline StringRef getEmptyKey() {
+    return StringRef(reinterpret_cast<const char *>(~static_cast<uintptr_t>(0)),
+                     0);
+  }
+
+  static inline StringRef getTombstoneKey() {
+    return StringRef(reinterpret_cast<const char *>(~static_cast<uintptr_t>(1)),
+                     0);
+  }
+
+  static unsigned getHashValue(StringRef Val) {
+    assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!");
+    assert(Val.data() != getTombstoneKey().data() &&
+           "Cannot hash the tombstone key!");
+    return (unsigned)(hash_value(Val));
+  }
+
+  static bool isEqual(StringRef LHS, StringRef RHS) {
+    if (RHS.data() == getEmptyKey().data())
+      return LHS.data() == getEmptyKey().data();
+    if (RHS.data() == getTombstoneKey().data())
+      return LHS.data() == getTombstoneKey().data();
+    return LHS == RHS;
+  }
+};
+
+// Provide DenseMapInfo for ArrayRefs.
+template <typename T> struct DenseMapInfo<ArrayRef<T>> {
+  static inline ArrayRef<T> getEmptyKey() {
+    return ArrayRef<T>(reinterpret_cast<const T *>(~static_cast<uintptr_t>(0)),
+                       size_t(0));
+  }
+
+  static inline ArrayRef<T> getTombstoneKey() {
+    return ArrayRef<T>(reinterpret_cast<const T *>(~static_cast<uintptr_t>(1)),
+                       size_t(0));
+  }
+
+  static unsigned getHashValue(ArrayRef<T> Val) {
+    assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!");
+    assert(Val.data() != getTombstoneKey().data() &&
+           "Cannot hash the tombstone key!");
+    return (unsigned)(hash_value(Val));
+  }
+
+  static bool isEqual(ArrayRef<T> LHS, ArrayRef<T> RHS) {
+    if (RHS.data() == getEmptyKey().data())
+      return LHS.data() == getEmptyKey().data();
+    if (RHS.data() == getTombstoneKey().data())
+      return LHS.data() == getTombstoneKey().data();
+    return LHS == RHS;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DENSEMAPINFO_H
diff --git a/linux-x64/clang/include/llvm/ADT/DenseSet.h b/linux-x64/clang/include/llvm/ADT/DenseSet.h
new file mode 100644
index 0000000..7e5171c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/DenseSet.h
@@ -0,0 +1,255 @@
+//===- llvm/ADT/DenseSet.h - Dense probed hash table ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DenseSet and SmallDenseSet classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DENSESET_H
+#define LLVM_ADT_DENSESET_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm> 
+#include <cstddef>
+#include <initializer_list>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+namespace detail {
+
+struct DenseSetEmpty {};
+
+// Use the empty base class trick so we can create a DenseMap where the buckets
+// contain only a single item.
+template <typename KeyT> class DenseSetPair : public DenseSetEmpty {
+  KeyT key;
+
+public:
+  KeyT &getFirst() { return key; }
+  const KeyT &getFirst() const { return key; }
+  DenseSetEmpty &getSecond() { return *this; }
+  const DenseSetEmpty &getSecond() const { return *this; }
+};
+
+/// Base class for DenseSet and DenseSmallSet.
+///
+/// MapTy should be either
+///
+///   DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
+///            detail::DenseSetPair<ValueT>>
+///
+/// or the equivalent SmallDenseMap type.  ValueInfoT must implement the
+/// DenseMapInfo "concept".
+template <typename ValueT, typename MapTy, typename ValueInfoT>
+class DenseSetImpl {
+  static_assert(sizeof(typename MapTy::value_type) == sizeof(ValueT),
+                "DenseMap buckets unexpectedly large!");
+  MapTy TheMap;
+
+  template <typename T>
+  using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
+
+public:
+  using key_type = ValueT;
+  using value_type = ValueT;
+  using size_type = unsigned;
+
+  explicit DenseSetImpl(unsigned InitialReserve = 0) : TheMap(InitialReserve) {}
+
+  DenseSetImpl(std::initializer_list<ValueT> Elems)
+      : DenseSetImpl(Elems.size()) {
+    insert(Elems.begin(), Elems.end());
+  }
+
+  bool empty() const { return TheMap.empty(); }
+  size_type size() const { return TheMap.size(); }
+  size_t getMemorySize() const { return TheMap.getMemorySize(); }
+
+  /// Grow the DenseSet so that it has at least Size buckets. Will not shrink
+  /// the Size of the set.
+  void resize(size_t Size) { TheMap.resize(Size); }
+
+  /// Grow the DenseSet so that it can contain at least \p NumEntries items
+  /// before resizing again.
+  void reserve(size_t Size) { TheMap.reserve(Size); }
+
+  void clear() {
+    TheMap.clear();
+  }
+
+  /// Return 1 if the specified key is in the set, 0 otherwise.
+  size_type count(const_arg_type_t<ValueT> V) const {
+    return TheMap.count(V);
+  }
+
+  bool erase(const ValueT &V) {
+    return TheMap.erase(V);
+  }
+
+  void swap(DenseSetImpl &RHS) { TheMap.swap(RHS.TheMap); }
+
+  // Iterators.
+
+  class ConstIterator;
+
+  class Iterator {
+    typename MapTy::iterator I;
+    friend class DenseSetImpl;
+    friend class ConstIterator;
+
+  public:
+    using difference_type = typename MapTy::iterator::difference_type;
+    using value_type = ValueT;
+    using pointer = value_type *;
+    using reference = value_type &;
+    using iterator_category = std::forward_iterator_tag;
+
+    Iterator() = default;
+    Iterator(const typename MapTy::iterator &i) : I(i) {}
+
+    ValueT &operator*() { return I->getFirst(); }
+    const ValueT &operator*() const { return I->getFirst(); }
+    ValueT *operator->() { return &I->getFirst(); }
+    const ValueT *operator->() const { return &I->getFirst(); }
+
+    Iterator& operator++() { ++I; return *this; }
+    Iterator operator++(int) { auto T = *this; ++I; return T; }
+    bool operator==(const ConstIterator& X) const { return I == X.I; }
+    bool operator!=(const ConstIterator& X) const { return I != X.I; }
+  };
+
+  class ConstIterator {
+    typename MapTy::const_iterator I;
+    friend class DenseSet;
+    friend class Iterator;
+
+  public:
+    using difference_type = typename MapTy::const_iterator::difference_type;
+    using value_type = ValueT;
+    using pointer = value_type *;
+    using reference = value_type &;
+    using iterator_category = std::forward_iterator_tag;
+
+    ConstIterator() = default;
+    ConstIterator(const Iterator &B) : I(B.I) {}
+    ConstIterator(const typename MapTy::const_iterator &i) : I(i) {}
+
+    const ValueT &operator*() const { return I->getFirst(); }
+    const ValueT *operator->() const { return &I->getFirst(); }
+
+    ConstIterator& operator++() { ++I; return *this; }
+    ConstIterator operator++(int) { auto T = *this; ++I; return T; }
+    bool operator==(const ConstIterator& X) const { return I == X.I; }
+    bool operator!=(const ConstIterator& X) const { return I != X.I; }
+  };
+
+  using iterator = Iterator;
+  using const_iterator = ConstIterator;
+
+  iterator begin() { return Iterator(TheMap.begin()); }
+  iterator end() { return Iterator(TheMap.end()); }
+
+  const_iterator begin() const { return ConstIterator(TheMap.begin()); }
+  const_iterator end() const { return ConstIterator(TheMap.end()); }
+
+  iterator find(const_arg_type_t<ValueT> V) { return Iterator(TheMap.find(V)); }
+  const_iterator find(const_arg_type_t<ValueT> V) const {
+    return ConstIterator(TheMap.find(V));
+  }
+
+  /// Alternative version of find() which allows a different, and possibly less
+  /// expensive, key type.
+  /// The DenseMapInfo is responsible for supplying methods
+  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key type
+  /// used.
+  template <class LookupKeyT>
+  iterator find_as(const LookupKeyT &Val) {
+    return Iterator(TheMap.find_as(Val));
+  }
+  template <class LookupKeyT>
+  const_iterator find_as(const LookupKeyT &Val) const {
+    return ConstIterator(TheMap.find_as(Val));
+  }
+
+  void erase(Iterator I) { return TheMap.erase(I.I); }
+  void erase(ConstIterator CI) { return TheMap.erase(CI.I); }
+
+  std::pair<iterator, bool> insert(const ValueT &V) {
+    detail::DenseSetEmpty Empty;
+    return TheMap.try_emplace(V, Empty);
+  }
+
+  std::pair<iterator, bool> insert(ValueT &&V) {
+    detail::DenseSetEmpty Empty;
+    return TheMap.try_emplace(std::move(V), Empty);
+  }
+
+  /// Alternative version of insert that uses a different (and possibly less
+  /// expensive) key type.
+  template <typename LookupKeyT>
+  std::pair<iterator, bool> insert_as(const ValueT &V,
+                                      const LookupKeyT &LookupKey) {
+    return TheMap.insert_as({V, detail::DenseSetEmpty()}, LookupKey);
+  }
+  template <typename LookupKeyT>
+  std::pair<iterator, bool> insert_as(ValueT &&V, const LookupKeyT &LookupKey) {
+    return TheMap.insert_as({std::move(V), detail::DenseSetEmpty()}, LookupKey);
+  }
+
+  // Range insertion of values.
+  template<typename InputIt>
+  void insert(InputIt I, InputIt E) {
+    for (; I != E; ++I)
+      insert(*I);
+  }
+};
+
+} // end namespace detail
+
+/// Implements a dense probed hash-table based set.
+template <typename ValueT, typename ValueInfoT = DenseMapInfo<ValueT>>
+class DenseSet : public detail::DenseSetImpl<
+                     ValueT, DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
+                                      detail::DenseSetPair<ValueT>>,
+                     ValueInfoT> {
+  using BaseT =
+      detail::DenseSetImpl<ValueT,
+                           DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
+                                    detail::DenseSetPair<ValueT>>,
+                           ValueInfoT>;
+
+public:
+  using BaseT::BaseT;
+};
+
+/// Implements a dense probed hash-table based set with some number of buckets
+/// stored inline.
+template <typename ValueT, unsigned InlineBuckets = 4,
+          typename ValueInfoT = DenseMapInfo<ValueT>>
+class SmallDenseSet
+    : public detail::DenseSetImpl<
+          ValueT, SmallDenseMap<ValueT, detail::DenseSetEmpty, InlineBuckets,
+                                ValueInfoT, detail::DenseSetPair<ValueT>>,
+          ValueInfoT> {
+  using BaseT = detail::DenseSetImpl<
+      ValueT, SmallDenseMap<ValueT, detail::DenseSetEmpty, InlineBuckets,
+                            ValueInfoT, detail::DenseSetPair<ValueT>>,
+      ValueInfoT>;
+
+public:
+  using BaseT::BaseT;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DENSESET_H
diff --git a/linux-x64/clang/include/llvm/ADT/DepthFirstIterator.h b/linux-x64/clang/include/llvm/ADT/DepthFirstIterator.h
new file mode 100644
index 0000000..e964d7f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/DepthFirstIterator.h
@@ -0,0 +1,308 @@
+//===- llvm/ADT/DepthFirstIterator.h - Depth First iterator -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file builds on the ADT/GraphTraits.h file to build generic depth
+// first graph iterator.  This file exposes the following functions/types:
+//
+// df_begin/df_end/df_iterator
+//   * Normal depth-first iteration - visit a node and then all of its children.
+//
+// idf_begin/idf_end/idf_iterator
+//   * Depth-first iteration on the 'inverse' graph.
+//
+// df_ext_begin/df_ext_end/df_ext_iterator
+//   * Normal depth-first iteration - visit a node and then all of its children.
+//     This iterator stores the 'visited' set in an external set, which allows
+//     it to be more efficient, and allows external clients to use the set for
+//     other purposes.
+//
+// idf_ext_begin/idf_ext_end/idf_ext_iterator
+//   * Depth-first iteration on the 'inverse' graph.
+//     This iterator stores the 'visited' set in an external set, which allows
+//     it to be more efficient, and allows external clients to use the set for
+//     other purposes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DEPTHFIRSTITERATOR_H
+#define LLVM_ADT_DEPTHFIRSTITERATOR_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/iterator_range.h"
+#include <iterator>
+#include <set>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+// df_iterator_storage - A private class which is used to figure out where to
+// store the visited set.
+template<class SetType, bool External>   // Non-external set
+class df_iterator_storage {
+public:
+  SetType Visited;
+};
+
+template<class SetType>
+class df_iterator_storage<SetType, true> {
+public:
+  df_iterator_storage(SetType &VSet) : Visited(VSet) {}
+  df_iterator_storage(const df_iterator_storage &S) : Visited(S.Visited) {}
+
+  SetType &Visited;
+};
+
+// The visited stated for the iteration is a simple set augmented with
+// one more method, completed, which is invoked when all children of a
+// node have been processed. It is intended to distinguish of back and
+// cross edges in the spanning tree but is not used in the common case.
+template <typename NodeRef, unsigned SmallSize=8>
+struct df_iterator_default_set : public SmallPtrSet<NodeRef, SmallSize> {
+  using BaseSet = SmallPtrSet<NodeRef, SmallSize>;
+  using iterator = typename BaseSet::iterator;
+
+  std::pair<iterator,bool> insert(NodeRef N) { return BaseSet::insert(N); }
+  template <typename IterT>
+  void insert(IterT Begin, IterT End) { BaseSet::insert(Begin,End); }
+
+  void completed(NodeRef) {}
+};
+
+// Generic Depth First Iterator
+template <class GraphT,
+          class SetType =
+              df_iterator_default_set<typename GraphTraits<GraphT>::NodeRef>,
+          bool ExtStorage = false, class GT = GraphTraits<GraphT>>
+class df_iterator
+    : public std::iterator<std::forward_iterator_tag, typename GT::NodeRef>,
+      public df_iterator_storage<SetType, ExtStorage> {
+  using super = std::iterator<std::forward_iterator_tag, typename GT::NodeRef>;
+  using NodeRef = typename GT::NodeRef;
+  using ChildItTy = typename GT::ChildIteratorType;
+
+  // First element is node reference, second is the 'next child' to visit.
+  // The second child is initialized lazily to pick up graph changes during the
+  // DFS.
+  using StackElement = std::pair<NodeRef, Optional<ChildItTy>>;
+
+  // VisitStack - Used to maintain the ordering.  Top = current block
+  std::vector<StackElement> VisitStack;
+
+private:
+  inline df_iterator(NodeRef Node) {
+    this->Visited.insert(Node);
+    VisitStack.push_back(StackElement(Node, None));
+  }
+
+  inline df_iterator() = default; // End is when stack is empty
+
+  inline df_iterator(NodeRef Node, SetType &S)
+      : df_iterator_storage<SetType, ExtStorage>(S) {
+    if (this->Visited.insert(Node).second)
+      VisitStack.push_back(StackElement(Node, None));
+  }
+
+  inline df_iterator(SetType &S)
+    : df_iterator_storage<SetType, ExtStorage>(S) {
+    // End is when stack is empty
+  }
+
+  inline void toNext() {
+    do {
+      NodeRef Node = VisitStack.back().first;
+      Optional<ChildItTy> &Opt = VisitStack.back().second;
+
+      if (!Opt)
+        Opt.emplace(GT::child_begin(Node));
+
+      // Notice that we directly mutate *Opt here, so that
+      // VisitStack.back().second actually gets updated as the iterator
+      // increases.
+      while (*Opt != GT::child_end(Node)) {
+        NodeRef Next = *(*Opt)++;
+        // Has our next sibling been visited?
+        if (this->Visited.insert(Next).second) {
+          // No, do it now.
+          VisitStack.push_back(StackElement(Next, None));
+          return;
+        }
+      }
+      this->Visited.completed(Node);
+
+      // Oops, ran out of successors... go up a level on the stack.
+      VisitStack.pop_back();
+    } while (!VisitStack.empty());
+  }
+
+public:
+  using pointer = typename super::pointer;
+
+  // Provide static begin and end methods as our public "constructors"
+  static df_iterator begin(const GraphT &G) {
+    return df_iterator(GT::getEntryNode(G));
+  }
+  static df_iterator end(const GraphT &G) { return df_iterator(); }
+
+  // Static begin and end methods as our public ctors for external iterators
+  static df_iterator begin(const GraphT &G, SetType &S) {
+    return df_iterator(GT::getEntryNode(G), S);
+  }
+  static df_iterator end(const GraphT &G, SetType &S) { return df_iterator(S); }
+
+  bool operator==(const df_iterator &x) const {
+    return VisitStack == x.VisitStack;
+  }
+  bool operator!=(const df_iterator &x) const { return !(*this == x); }
+
+  const NodeRef &operator*() const { return VisitStack.back().first; }
+
+  // This is a nonstandard operator-> that dereferences the pointer an extra
+  // time... so that you can actually call methods ON the Node, because
+  // the contained type is a pointer.  This allows BBIt->getTerminator() f.e.
+  //
+  NodeRef operator->() const { return **this; }
+
+  df_iterator &operator++() { // Preincrement
+    toNext();
+    return *this;
+  }
+
+  /// \brief Skips all children of the current node and traverses to next node
+  ///
+  /// Note: This function takes care of incrementing the iterator. If you
+  /// always increment and call this function, you risk walking off the end.
+  df_iterator &skipChildren() {
+    VisitStack.pop_back();
+    if (!VisitStack.empty())
+      toNext();
+    return *this;
+  }
+
+  df_iterator operator++(int) { // Postincrement
+    df_iterator tmp = *this;
+    ++*this;
+    return tmp;
+  }
+
+  // nodeVisited - return true if this iterator has already visited the
+  // specified node.  This is public, and will probably be used to iterate over
+  // nodes that a depth first iteration did not find: ie unreachable nodes.
+  //
+  bool nodeVisited(NodeRef Node) const {
+    return this->Visited.count(Node) != 0;
+  }
+
+  /// getPathLength - Return the length of the path from the entry node to the
+  /// current node, counting both nodes.
+  unsigned getPathLength() const { return VisitStack.size(); }
+
+  /// getPath - Return the n'th node in the path from the entry node to the
+  /// current node.
+  NodeRef getPath(unsigned n) const { return VisitStack[n].first; }
+};
+
+// Provide global constructors that automatically figure out correct types...
+//
+template <class T>
+df_iterator<T> df_begin(const T& G) {
+  return df_iterator<T>::begin(G);
+}
+
+template <class T>
+df_iterator<T> df_end(const T& G) {
+  return df_iterator<T>::end(G);
+}
+
+// Provide an accessor method to use them in range-based patterns.
+template <class T>
+iterator_range<df_iterator<T>> depth_first(const T& G) {
+  return make_range(df_begin(G), df_end(G));
+}
+
+// Provide global definitions of external depth first iterators...
+template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeRef>>
+struct df_ext_iterator : public df_iterator<T, SetTy, true> {
+  df_ext_iterator(const df_iterator<T, SetTy, true> &V)
+    : df_iterator<T, SetTy, true>(V) {}
+};
+
+template <class T, class SetTy>
+df_ext_iterator<T, SetTy> df_ext_begin(const T& G, SetTy &S) {
+  return df_ext_iterator<T, SetTy>::begin(G, S);
+}
+
+template <class T, class SetTy>
+df_ext_iterator<T, SetTy> df_ext_end(const T& G, SetTy &S) {
+  return df_ext_iterator<T, SetTy>::end(G, S);
+}
+
+template <class T, class SetTy>
+iterator_range<df_ext_iterator<T, SetTy>> depth_first_ext(const T& G,
+                                                          SetTy &S) {
+  return make_range(df_ext_begin(G, S), df_ext_end(G, S));
+}
+
+// Provide global definitions of inverse depth first iterators...
+template <class T,
+          class SetTy =
+              df_iterator_default_set<typename GraphTraits<T>::NodeRef>,
+          bool External = false>
+struct idf_iterator : public df_iterator<Inverse<T>, SetTy, External> {
+  idf_iterator(const df_iterator<Inverse<T>, SetTy, External> &V)
+    : df_iterator<Inverse<T>, SetTy, External>(V) {}
+};
+
+template <class T>
+idf_iterator<T> idf_begin(const T& G) {
+  return idf_iterator<T>::begin(Inverse<T>(G));
+}
+
+template <class T>
+idf_iterator<T> idf_end(const T& G){
+  return idf_iterator<T>::end(Inverse<T>(G));
+}
+
+// Provide an accessor method to use them in range-based patterns.
+template <class T>
+iterator_range<idf_iterator<T>> inverse_depth_first(const T& G) {
+  return make_range(idf_begin(G), idf_end(G));
+}
+
+// Provide global definitions of external inverse depth first iterators...
+template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeRef>>
+struct idf_ext_iterator : public idf_iterator<T, SetTy, true> {
+  idf_ext_iterator(const idf_iterator<T, SetTy, true> &V)
+    : idf_iterator<T, SetTy, true>(V) {}
+  idf_ext_iterator(const df_iterator<Inverse<T>, SetTy, true> &V)
+    : idf_iterator<T, SetTy, true>(V) {}
+};
+
+template <class T, class SetTy>
+idf_ext_iterator<T, SetTy> idf_ext_begin(const T& G, SetTy &S) {
+  return idf_ext_iterator<T, SetTy>::begin(Inverse<T>(G), S);
+}
+
+template <class T, class SetTy>
+idf_ext_iterator<T, SetTy> idf_ext_end(const T& G, SetTy &S) {
+  return idf_ext_iterator<T, SetTy>::end(Inverse<T>(G), S);
+}
+
+template <class T, class SetTy>
+iterator_range<idf_ext_iterator<T, SetTy>> inverse_depth_first_ext(const T& G,
+                                                                   SetTy &S) {
+  return make_range(idf_ext_begin(G, S), idf_ext_end(G, S));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DEPTHFIRSTITERATOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/EpochTracker.h b/linux-x64/clang/include/llvm/ADT/EpochTracker.h
new file mode 100644
index 0000000..db39ba4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/EpochTracker.h
@@ -0,0 +1,100 @@
+//===- llvm/ADT/EpochTracker.h - ADT epoch tracking --------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
+// These can be used to write iterators that are fail-fast when LLVM is built
+// with asserts enabled.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_EPOCH_TRACKER_H
+#define LLVM_ADT_EPOCH_TRACKER_H
+
+#include "llvm/Config/abi-breaking.h"
+#include "llvm/Config/llvm-config.h"
+
+#include <cstdint>
+
+namespace llvm {
+
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+
+/// \brief A base class for data structure classes wishing to make iterators
+/// ("handles") pointing into themselves fail-fast.  When building without
+/// asserts, this class is empty and does nothing.
+///
+/// DebugEpochBase does not by itself track handles pointing into itself.  The
+/// expectation is that routines touching the handles will poll on
+/// isHandleInSync at appropriate points to assert that the handle they're using
+/// is still valid.
+///
+class DebugEpochBase {
+  uint64_t Epoch;
+
+public:
+  DebugEpochBase() : Epoch(0) {}
+
+  /// \brief Calling incrementEpoch invalidates all handles pointing into the
+  /// calling instance.
+  void incrementEpoch() { ++Epoch; }
+
+  /// \brief The destructor calls incrementEpoch to make use-after-free bugs
+  /// more likely to crash deterministically.
+  ~DebugEpochBase() { incrementEpoch(); }
+
+  /// \brief A base class for iterator classes ("handles") that wish to poll for
+  /// iterator invalidating modifications in the underlying data structure.
+  /// When LLVM is built without asserts, this class is empty and does nothing.
+  ///
+  /// HandleBase does not track the parent data structure by itself.  It expects
+  /// the routines modifying the data structure to call incrementEpoch when they
+  /// make an iterator-invalidating modification.
+  ///
+  class HandleBase {
+    const uint64_t *EpochAddress;
+    uint64_t EpochAtCreation;
+
+  public:
+    HandleBase() : EpochAddress(nullptr), EpochAtCreation(UINT64_MAX) {}
+
+    explicit HandleBase(const DebugEpochBase *Parent)
+        : EpochAddress(&Parent->Epoch), EpochAtCreation(Parent->Epoch) {}
+
+    /// \brief Returns true if the DebugEpochBase this Handle is linked to has
+    /// not called incrementEpoch on itself since the creation of this
+    /// HandleBase instance.
+    bool isHandleInSync() const { return *EpochAddress == EpochAtCreation; }
+
+    /// \brief Returns a pointer to the epoch word stored in the data structure
+    /// this handle points into.  Can be used to check if two iterators point
+    /// into the same data structure.
+    const void *getEpochAddress() const { return EpochAddress; }
+  };
+};
+
+#else
+
+class DebugEpochBase {
+public:
+  void incrementEpoch() {}
+
+  class HandleBase {
+  public:
+    HandleBase() = default;
+    explicit HandleBase(const DebugEpochBase *) {}
+    bool isHandleInSync() const { return true; }
+    const void *getEpochAddress() const { return nullptr; }
+  };
+};
+
+#endif // LLVM_ENABLE_ABI_BREAKING_CHECKS
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/EquivalenceClasses.h b/linux-x64/clang/include/llvm/ADT/EquivalenceClasses.h
new file mode 100644
index 0000000..e3f4843
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/EquivalenceClasses.h
@@ -0,0 +1,298 @@
+//===- llvm/ADT/EquivalenceClasses.h - Generic Equiv. Classes ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic implementation of equivalence classes through the use Tarjan's
+// efficient union-find algorithm.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_EQUIVALENCECLASSES_H
+#define LLVM_ADT_EQUIVALENCECLASSES_H
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <set>
+
+namespace llvm {
+
+/// EquivalenceClasses - This represents a collection of equivalence classes and
+/// supports three efficient operations: insert an element into a class of its
+/// own, union two classes, and find the class for a given element.  In
+/// addition to these modification methods, it is possible to iterate over all
+/// of the equivalence classes and all of the elements in a class.
+///
+/// This implementation is an efficient implementation that only stores one copy
+/// of the element being indexed per entry in the set, and allows any arbitrary
+/// type to be indexed (as long as it can be ordered with operator<).
+///
+/// Here is a simple example using integers:
+///
+/// \code
+///  EquivalenceClasses<int> EC;
+///  EC.unionSets(1, 2);                // insert 1, 2 into the same set
+///  EC.insert(4); EC.insert(5);        // insert 4, 5 into own sets
+///  EC.unionSets(5, 1);                // merge the set for 1 with 5's set.
+///
+///  for (EquivalenceClasses<int>::iterator I = EC.begin(), E = EC.end();
+///       I != E; ++I) {           // Iterate over all of the equivalence sets.
+///    if (!I->isLeader()) continue;   // Ignore non-leader sets.
+///    for (EquivalenceClasses<int>::member_iterator MI = EC.member_begin(I);
+///         MI != EC.member_end(); ++MI)   // Loop over members in this set.
+///      cerr << *MI << " ";  // Print member.
+///    cerr << "\n";   // Finish set.
+///  }
+/// \endcode
+///
+/// This example prints:
+///   4
+///   5 1 2
+///
+template <class ElemTy>
+class EquivalenceClasses {
+  /// ECValue - The EquivalenceClasses data structure is just a set of these.
+  /// Each of these represents a relation for a value.  First it stores the
+  /// value itself, which provides the ordering that the set queries.  Next, it
+  /// provides a "next pointer", which is used to enumerate all of the elements
+  /// in the unioned set.  Finally, it defines either a "end of list pointer" or
+  /// "leader pointer" depending on whether the value itself is a leader.  A
+  /// "leader pointer" points to the node that is the leader for this element,
+  /// if the node is not a leader.  A "end of list pointer" points to the last
+  /// node in the list of members of this list.  Whether or not a node is a
+  /// leader is determined by a bit stolen from one of the pointers.
+  class ECValue {
+    friend class EquivalenceClasses;
+
+    mutable const ECValue *Leader, *Next;
+    ElemTy Data;
+
+    // ECValue ctor - Start out with EndOfList pointing to this node, Next is
+    // Null, isLeader = true.
+    ECValue(const ElemTy &Elt)
+      : Leader(this), Next((ECValue*)(intptr_t)1), Data(Elt) {}
+
+    const ECValue *getLeader() const {
+      if (isLeader()) return this;
+      if (Leader->isLeader()) return Leader;
+      // Path compression.
+      return Leader = Leader->getLeader();
+    }
+
+    const ECValue *getEndOfList() const {
+      assert(isLeader() && "Cannot get the end of a list for a non-leader!");
+      return Leader;
+    }
+
+    void setNext(const ECValue *NewNext) const {
+      assert(getNext() == nullptr && "Already has a next pointer!");
+      Next = (const ECValue*)((intptr_t)NewNext | (intptr_t)isLeader());
+    }
+
+  public:
+    ECValue(const ECValue &RHS) : Leader(this), Next((ECValue*)(intptr_t)1),
+                                  Data(RHS.Data) {
+      // Only support copying of singleton nodes.
+      assert(RHS.isLeader() && RHS.getNext() == nullptr && "Not a singleton!");
+    }
+
+    bool operator<(const ECValue &UFN) const { return Data < UFN.Data; }
+
+    bool isLeader() const { return (intptr_t)Next & 1; }
+    const ElemTy &getData() const { return Data; }
+
+    const ECValue *getNext() const {
+      return (ECValue*)((intptr_t)Next & ~(intptr_t)1);
+    }
+
+    template<typename T>
+    bool operator<(const T &Val) const { return Data < Val; }
+  };
+
+  /// TheMapping - This implicitly provides a mapping from ElemTy values to the
+  /// ECValues, it just keeps the key as part of the value.
+  std::set<ECValue> TheMapping;
+
+public:
+  EquivalenceClasses() = default;
+  EquivalenceClasses(const EquivalenceClasses &RHS) {
+    operator=(RHS);
+  }
+
+  const EquivalenceClasses &operator=(const EquivalenceClasses &RHS) {
+    TheMapping.clear();
+    for (iterator I = RHS.begin(), E = RHS.end(); I != E; ++I)
+      if (I->isLeader()) {
+        member_iterator MI = RHS.member_begin(I);
+        member_iterator LeaderIt = member_begin(insert(*MI));
+        for (++MI; MI != member_end(); ++MI)
+          unionSets(LeaderIt, member_begin(insert(*MI)));
+      }
+    return *this;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Inspection methods
+  //
+
+  /// iterator* - Provides a way to iterate over all values in the set.
+  using iterator = typename std::set<ECValue>::const_iterator;
+
+  iterator begin() const { return TheMapping.begin(); }
+  iterator end() const { return TheMapping.end(); }
+
+  bool empty() const { return TheMapping.empty(); }
+
+  /// member_* Iterate over the members of an equivalence class.
+  class member_iterator;
+  member_iterator member_begin(iterator I) const {
+    // Only leaders provide anything to iterate over.
+    return member_iterator(I->isLeader() ? &*I : nullptr);
+  }
+  member_iterator member_end() const {
+    return member_iterator(nullptr);
+  }
+
+  /// findValue - Return an iterator to the specified value.  If it does not
+  /// exist, end() is returned.
+  iterator findValue(const ElemTy &V) const {
+    return TheMapping.find(V);
+  }
+
+  /// getLeaderValue - Return the leader for the specified value that is in the
+  /// set.  It is an error to call this method for a value that is not yet in
+  /// the set.  For that, call getOrInsertLeaderValue(V).
+  const ElemTy &getLeaderValue(const ElemTy &V) const {
+    member_iterator MI = findLeader(V);
+    assert(MI != member_end() && "Value is not in the set!");
+    return *MI;
+  }
+
+  /// getOrInsertLeaderValue - Return the leader for the specified value that is
+  /// in the set.  If the member is not in the set, it is inserted, then
+  /// returned.
+  const ElemTy &getOrInsertLeaderValue(const ElemTy &V) {
+    member_iterator MI = findLeader(insert(V));
+    assert(MI != member_end() && "Value is not in the set!");
+    return *MI;
+  }
+
+  /// getNumClasses - Return the number of equivalence classes in this set.
+  /// Note that this is a linear time operation.
+  unsigned getNumClasses() const {
+    unsigned NC = 0;
+    for (iterator I = begin(), E = end(); I != E; ++I)
+      if (I->isLeader()) ++NC;
+    return NC;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Mutation methods
+
+  /// insert - Insert a new value into the union/find set, ignoring the request
+  /// if the value already exists.
+  iterator insert(const ElemTy &Data) {
+    return TheMapping.insert(ECValue(Data)).first;
+  }
+
+  /// findLeader - Given a value in the set, return a member iterator for the
+  /// equivalence class it is in.  This does the path-compression part that
+  /// makes union-find "union findy".  This returns an end iterator if the value
+  /// is not in the equivalence class.
+  member_iterator findLeader(iterator I) const {
+    if (I == TheMapping.end()) return member_end();
+    return member_iterator(I->getLeader());
+  }
+  member_iterator findLeader(const ElemTy &V) const {
+    return findLeader(TheMapping.find(V));
+  }
+
+  /// union - Merge the two equivalence sets for the specified values, inserting
+  /// them if they do not already exist in the equivalence set.
+  member_iterator unionSets(const ElemTy &V1, const ElemTy &V2) {
+    iterator V1I = insert(V1), V2I = insert(V2);
+    return unionSets(findLeader(V1I), findLeader(V2I));
+  }
+  member_iterator unionSets(member_iterator L1, member_iterator L2) {
+    assert(L1 != member_end() && L2 != member_end() && "Illegal inputs!");
+    if (L1 == L2) return L1;   // Unifying the same two sets, noop.
+
+    // Otherwise, this is a real union operation.  Set the end of the L1 list to
+    // point to the L2 leader node.
+    const ECValue &L1LV = *L1.Node, &L2LV = *L2.Node;
+    L1LV.getEndOfList()->setNext(&L2LV);
+
+    // Update L1LV's end of list pointer.
+    L1LV.Leader = L2LV.getEndOfList();
+
+    // Clear L2's leader flag:
+    L2LV.Next = L2LV.getNext();
+
+    // L2's leader is now L1.
+    L2LV.Leader = &L1LV;
+    return L1;
+  }
+
+  // isEquivalent - Return true if V1 is equivalent to V2. This can happen if
+  // V1 is equal to V2 or if they belong to one equivalence class.
+  bool isEquivalent(const ElemTy &V1, const ElemTy &V2) const {
+    // Fast path: any element is equivalent to itself.
+    if (V1 == V2)
+      return true;
+    auto It = findLeader(V1);
+    return It != member_end() && It == findLeader(V2);
+  }
+
+  class member_iterator : public std::iterator<std::forward_iterator_tag,
+                                               const ElemTy, ptrdiff_t> {
+    friend class EquivalenceClasses;
+
+    using super = std::iterator<std::forward_iterator_tag,
+                                const ElemTy, ptrdiff_t>;
+
+    const ECValue *Node;
+
+  public:
+    using size_type = size_t;
+    using pointer = typename super::pointer;
+    using reference = typename super::reference;
+
+    explicit member_iterator() = default;
+    explicit member_iterator(const ECValue *N) : Node(N) {}
+
+    reference operator*() const {
+      assert(Node != nullptr && "Dereferencing end()!");
+      return Node->getData();
+    }
+    pointer operator->() const { return &operator*(); }
+
+    member_iterator &operator++() {
+      assert(Node != nullptr && "++'d off the end of the list!");
+      Node = Node->getNext();
+      return *this;
+    }
+
+    member_iterator operator++(int) {    // postincrement operators.
+      member_iterator tmp = *this;
+      ++*this;
+      return tmp;
+    }
+
+    bool operator==(const member_iterator &RHS) const {
+      return Node == RHS.Node;
+    }
+    bool operator!=(const member_iterator &RHS) const {
+      return Node != RHS.Node;
+    }
+  };
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_EQUIVALENCECLASSES_H
diff --git a/linux-x64/clang/include/llvm/ADT/FoldingSet.h b/linux-x64/clang/include/llvm/ADT/FoldingSet.h
new file mode 100644
index 0000000..e363e69
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/FoldingSet.h
@@ -0,0 +1,762 @@
+//===- llvm/ADT/FoldingSet.h - Uniquing Hash Set ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a hash set that can be used to remove duplication of nodes
+// in a graph.  This code was originally created by Chris Lattner for use with
+// SelectionDAGCSEMap, but was isolated to provide use across the llvm code set.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_FOLDINGSET_H
+#define LLVM_ADT_FOLDINGSET_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+/// This folding set used for two purposes:
+///   1. Given information about a node we want to create, look up the unique
+///      instance of the node in the set.  If the node already exists, return
+///      it, otherwise return the bucket it should be inserted into.
+///   2. Given a node that has already been created, remove it from the set.
+///
+/// This class is implemented as a single-link chained hash table, where the
+/// "buckets" are actually the nodes themselves (the next pointer is in the
+/// node).  The last node points back to the bucket to simplify node removal.
+///
+/// Any node that is to be included in the folding set must be a subclass of
+/// FoldingSetNode.  The node class must also define a Profile method used to
+/// establish the unique bits of data for the node.  The Profile method is
+/// passed a FoldingSetNodeID object which is used to gather the bits.  Just
+/// call one of the Add* functions defined in the FoldingSetBase::NodeID class.
+/// NOTE: That the folding set does not own the nodes and it is the
+/// responsibility of the user to dispose of the nodes.
+///
+/// Eg.
+///    class MyNode : public FoldingSetNode {
+///    private:
+///      std::string Name;
+///      unsigned Value;
+///    public:
+///      MyNode(const char *N, unsigned V) : Name(N), Value(V) {}
+///       ...
+///      void Profile(FoldingSetNodeID &ID) const {
+///        ID.AddString(Name);
+///        ID.AddInteger(Value);
+///      }
+///      ...
+///    };
+///
+/// To define the folding set itself use the FoldingSet template;
+///
+/// Eg.
+///    FoldingSet<MyNode> MyFoldingSet;
+///
+/// Four public methods are available to manipulate the folding set;
+///
+/// 1) If you have an existing node that you want add to the set but unsure
+/// that the node might already exist then call;
+///
+///    MyNode *M = MyFoldingSet.GetOrInsertNode(N);
+///
+/// If The result is equal to the input then the node has been inserted.
+/// Otherwise, the result is the node existing in the folding set, and the
+/// input can be discarded (use the result instead.)
+///
+/// 2) If you are ready to construct a node but want to check if it already
+/// exists, then call FindNodeOrInsertPos with a FoldingSetNodeID of the bits to
+/// check;
+///
+///   FoldingSetNodeID ID;
+///   ID.AddString(Name);
+///   ID.AddInteger(Value);
+///   void *InsertPoint;
+///
+///    MyNode *M = MyFoldingSet.FindNodeOrInsertPos(ID, InsertPoint);
+///
+/// If found then M with be non-NULL, else InsertPoint will point to where it
+/// should be inserted using InsertNode.
+///
+/// 3) If you get a NULL result from FindNodeOrInsertPos then you can as a new
+/// node with FindNodeOrInsertPos;
+///
+///    InsertNode(N, InsertPoint);
+///
+/// 4) Finally, if you want to remove a node from the folding set call;
+///
+///    bool WasRemoved = RemoveNode(N);
+///
+/// The result indicates whether the node existed in the folding set.
+
+class FoldingSetNodeID;
+class StringRef;
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetBase - Implements the folding set functionality.  The main
+/// structure is an array of buckets.  Each bucket is indexed by the hash of
+/// the nodes it contains.  The bucket itself points to the nodes contained
+/// in the bucket via a singly linked list.  The last node in the list points
+/// back to the bucket to facilitate node removal.
+///
+class FoldingSetBase {
+  virtual void anchor(); // Out of line virtual method.
+
+protected:
+  /// Buckets - Array of bucket chains.
+  void **Buckets;
+
+  /// NumBuckets - Length of the Buckets array.  Always a power of 2.
+  unsigned NumBuckets;
+
+  /// NumNodes - Number of nodes in the folding set. Growth occurs when NumNodes
+  /// is greater than twice the number of buckets.
+  unsigned NumNodes;
+
+  explicit FoldingSetBase(unsigned Log2InitSize = 6);
+  FoldingSetBase(FoldingSetBase &&Arg);
+  FoldingSetBase &operator=(FoldingSetBase &&RHS);
+  ~FoldingSetBase();
+
+public:
+  //===--------------------------------------------------------------------===//
+  /// Node - This class is used to maintain the singly linked bucket list in
+  /// a folding set.
+  class Node {
+  private:
+    // NextInFoldingSetBucket - next link in the bucket list.
+    void *NextInFoldingSetBucket = nullptr;
+
+  public:
+    Node() = default;
+
+    // Accessors
+    void *getNextInBucket() const { return NextInFoldingSetBucket; }
+    void SetNextInBucket(void *N) { NextInFoldingSetBucket = N; }
+  };
+
+  /// clear - Remove all nodes from the folding set.
+  void clear();
+
+  /// size - Returns the number of nodes in the folding set.
+  unsigned size() const { return NumNodes; }
+
+  /// empty - Returns true if there are no nodes in the folding set.
+  bool empty() const { return NumNodes == 0; }
+
+  /// reserve - Increase the number of buckets such that adding the
+  /// EltCount-th node won't cause a rebucket operation. reserve is permitted
+  /// to allocate more space than requested by EltCount.
+  void reserve(unsigned EltCount);
+
+  /// capacity - Returns the number of nodes permitted in the folding set
+  /// before a rebucket operation is performed.
+  unsigned capacity() {
+    // We allow a load factor of up to 2.0,
+    // so that means our capacity is NumBuckets * 2
+    return NumBuckets * 2;
+  }
+
+private:
+  /// GrowHashTable - Double the size of the hash table and rehash everything.
+  void GrowHashTable();
+
+  /// GrowBucketCount - resize the hash table and rehash everything.
+  /// NewBucketCount must be a power of two, and must be greater than the old
+  /// bucket count.
+  void GrowBucketCount(unsigned NewBucketCount);
+
+protected:
+  /// GetNodeProfile - Instantiations of the FoldingSet template implement
+  /// this function to gather data bits for the given node.
+  virtual void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const = 0;
+
+  /// NodeEquals - Instantiations of the FoldingSet template implement
+  /// this function to compare the given node with the given ID.
+  virtual bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
+                          FoldingSetNodeID &TempID) const=0;
+
+  /// ComputeNodeHash - Instantiations of the FoldingSet template implement
+  /// this function to compute a hash value for the given node.
+  virtual unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const = 0;
+
+  // The below methods are protected to encourage subclasses to provide a more
+  // type-safe API.
+
+  /// RemoveNode - Remove a node from the folding set, returning true if one
+  /// was removed or false if the node was not in the folding set.
+  bool RemoveNode(Node *N);
+
+  /// GetOrInsertNode - If there is an existing simple Node exactly
+  /// equal to the specified node, return it.  Otherwise, insert 'N' and return
+  /// it instead.
+  Node *GetOrInsertNode(Node *N);
+
+  /// FindNodeOrInsertPos - Look up the node specified by ID.  If it exists,
+  /// return it.  If not, return the insertion token that will make insertion
+  /// faster.
+  Node *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
+
+  /// InsertNode - Insert the specified node into the folding set, knowing that
+  /// it is not already in the folding set.  InsertPos must be obtained from
+  /// FindNodeOrInsertPos.
+  void InsertNode(Node *N, void *InsertPos);
+};
+
+//===----------------------------------------------------------------------===//
+
+/// DefaultFoldingSetTrait - This class provides default implementations
+/// for FoldingSetTrait implementations.
+template<typename T> struct DefaultFoldingSetTrait {
+  static void Profile(const T &X, FoldingSetNodeID &ID) {
+    X.Profile(ID);
+  }
+  static void Profile(T &X, FoldingSetNodeID &ID) {
+    X.Profile(ID);
+  }
+
+  // Equals - Test if the profile for X would match ID, using TempID
+  // to compute a temporary ID if necessary. The default implementation
+  // just calls Profile and does a regular comparison. Implementations
+  // can override this to provide more efficient implementations.
+  static inline bool Equals(T &X, const FoldingSetNodeID &ID, unsigned IDHash,
+                            FoldingSetNodeID &TempID);
+
+  // ComputeHash - Compute a hash value for X, using TempID to
+  // compute a temporary ID if necessary. The default implementation
+  // just calls Profile and does a regular hash computation.
+  // Implementations can override this to provide more efficient
+  // implementations.
+  static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID);
+};
+
+/// FoldingSetTrait - This trait class is used to define behavior of how
+/// to "profile" (in the FoldingSet parlance) an object of a given type.
+/// The default behavior is to invoke a 'Profile' method on an object, but
+/// through template specialization the behavior can be tailored for specific
+/// types.  Combined with the FoldingSetNodeWrapper class, one can add objects
+/// to FoldingSets that were not originally designed to have that behavior.
+template<typename T> struct FoldingSetTrait
+  : public DefaultFoldingSetTrait<T> {};
+
+/// DefaultContextualFoldingSetTrait - Like DefaultFoldingSetTrait, but
+/// for ContextualFoldingSets.
+template<typename T, typename Ctx>
+struct DefaultContextualFoldingSetTrait {
+  static void Profile(T &X, FoldingSetNodeID &ID, Ctx Context) {
+    X.Profile(ID, Context);
+  }
+
+  static inline bool Equals(T &X, const FoldingSetNodeID &ID, unsigned IDHash,
+                            FoldingSetNodeID &TempID, Ctx Context);
+  static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID,
+                                     Ctx Context);
+};
+
+/// ContextualFoldingSetTrait - Like FoldingSetTrait, but for
+/// ContextualFoldingSets.
+template<typename T, typename Ctx> struct ContextualFoldingSetTrait
+  : public DefaultContextualFoldingSetTrait<T, Ctx> {};
+
+//===--------------------------------------------------------------------===//
+/// FoldingSetNodeIDRef - This class describes a reference to an interned
+/// FoldingSetNodeID, which can be a useful to store node id data rather
+/// than using plain FoldingSetNodeIDs, since the 32-element SmallVector
+/// is often much larger than necessary, and the possibility of heap
+/// allocation means it requires a non-trivial destructor call.
+class FoldingSetNodeIDRef {
+  const unsigned *Data = nullptr;
+  size_t Size = 0;
+
+public:
+  FoldingSetNodeIDRef() = default;
+  FoldingSetNodeIDRef(const unsigned *D, size_t S) : Data(D), Size(S) {}
+
+  /// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef,
+  /// used to lookup the node in the FoldingSetBase.
+  unsigned ComputeHash() const;
+
+  bool operator==(FoldingSetNodeIDRef) const;
+
+  bool operator!=(FoldingSetNodeIDRef RHS) const { return !(*this == RHS); }
+
+  /// Used to compare the "ordering" of two nodes as defined by the
+  /// profiled bits and their ordering defined by memcmp().
+  bool operator<(FoldingSetNodeIDRef) const;
+
+  const unsigned *getData() const { return Data; }
+  size_t getSize() const { return Size; }
+};
+
+//===--------------------------------------------------------------------===//
+/// FoldingSetNodeID - This class is used to gather all the unique data bits of
+/// a node.  When all the bits are gathered this class is used to produce a
+/// hash value for the node.
+class FoldingSetNodeID {
+  /// Bits - Vector of all the data bits that make the node unique.
+  /// Use a SmallVector to avoid a heap allocation in the common case.
+  SmallVector<unsigned, 32> Bits;
+
+public:
+  FoldingSetNodeID() = default;
+
+  FoldingSetNodeID(FoldingSetNodeIDRef Ref)
+    : Bits(Ref.getData(), Ref.getData() + Ref.getSize()) {}
+
+  /// Add* - Add various data types to Bit data.
+  void AddPointer(const void *Ptr);
+  void AddInteger(signed I);
+  void AddInteger(unsigned I);
+  void AddInteger(long I);
+  void AddInteger(unsigned long I);
+  void AddInteger(long long I);
+  void AddInteger(unsigned long long I);
+  void AddBoolean(bool B) { AddInteger(B ? 1U : 0U); }
+  void AddString(StringRef String);
+  void AddNodeID(const FoldingSetNodeID &ID);
+
+  template <typename T>
+  inline void Add(const T &x) { FoldingSetTrait<T>::Profile(x, *this); }
+
+  /// clear - Clear the accumulated profile, allowing this FoldingSetNodeID
+  /// object to be used to compute a new profile.
+  inline void clear() { Bits.clear(); }
+
+  /// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used
+  /// to lookup the node in the FoldingSetBase.
+  unsigned ComputeHash() const;
+
+  /// operator== - Used to compare two nodes to each other.
+  bool operator==(const FoldingSetNodeID &RHS) const;
+  bool operator==(const FoldingSetNodeIDRef RHS) const;
+
+  bool operator!=(const FoldingSetNodeID &RHS) const { return !(*this == RHS); }
+  bool operator!=(const FoldingSetNodeIDRef RHS) const { return !(*this ==RHS);}
+
+  /// Used to compare the "ordering" of two nodes as defined by the
+  /// profiled bits and their ordering defined by memcmp().
+  bool operator<(const FoldingSetNodeID &RHS) const;
+  bool operator<(const FoldingSetNodeIDRef RHS) const;
+
+  /// Intern - Copy this node's data to a memory region allocated from the
+  /// given allocator and return a FoldingSetNodeIDRef describing the
+  /// interned data.
+  FoldingSetNodeIDRef Intern(BumpPtrAllocator &Allocator) const;
+};
+
+// Convenience type to hide the implementation of the folding set.
+using FoldingSetNode = FoldingSetBase::Node;
+template<class T> class FoldingSetIterator;
+template<class T> class FoldingSetBucketIterator;
+
+// Definitions of FoldingSetTrait and ContextualFoldingSetTrait functions, which
+// require the definition of FoldingSetNodeID.
+template<typename T>
+inline bool
+DefaultFoldingSetTrait<T>::Equals(T &X, const FoldingSetNodeID &ID,
+                                  unsigned /*IDHash*/,
+                                  FoldingSetNodeID &TempID) {
+  FoldingSetTrait<T>::Profile(X, TempID);
+  return TempID == ID;
+}
+template<typename T>
+inline unsigned
+DefaultFoldingSetTrait<T>::ComputeHash(T &X, FoldingSetNodeID &TempID) {
+  FoldingSetTrait<T>::Profile(X, TempID);
+  return TempID.ComputeHash();
+}
+template<typename T, typename Ctx>
+inline bool
+DefaultContextualFoldingSetTrait<T, Ctx>::Equals(T &X,
+                                                 const FoldingSetNodeID &ID,
+                                                 unsigned /*IDHash*/,
+                                                 FoldingSetNodeID &TempID,
+                                                 Ctx Context) {
+  ContextualFoldingSetTrait<T, Ctx>::Profile(X, TempID, Context);
+  return TempID == ID;
+}
+template<typename T, typename Ctx>
+inline unsigned
+DefaultContextualFoldingSetTrait<T, Ctx>::ComputeHash(T &X,
+                                                      FoldingSetNodeID &TempID,
+                                                      Ctx Context) {
+  ContextualFoldingSetTrait<T, Ctx>::Profile(X, TempID, Context);
+  return TempID.ComputeHash();
+}
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetImpl - An implementation detail that lets us share code between
+/// FoldingSet and ContextualFoldingSet.
+template <class T> class FoldingSetImpl : public FoldingSetBase {
+protected:
+  explicit FoldingSetImpl(unsigned Log2InitSize)
+      : FoldingSetBase(Log2InitSize) {}
+
+  FoldingSetImpl(FoldingSetImpl &&Arg) = default;
+  FoldingSetImpl &operator=(FoldingSetImpl &&RHS) = default;
+  ~FoldingSetImpl() = default;
+
+public:
+  using iterator = FoldingSetIterator<T>;
+
+  iterator begin() { return iterator(Buckets); }
+  iterator end() { return iterator(Buckets+NumBuckets); }
+
+  using const_iterator = FoldingSetIterator<const T>;
+
+  const_iterator begin() const { return const_iterator(Buckets); }
+  const_iterator end() const { return const_iterator(Buckets+NumBuckets); }
+
+  using bucket_iterator = FoldingSetBucketIterator<T>;
+
+  bucket_iterator bucket_begin(unsigned hash) {
+    return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
+  }
+
+  bucket_iterator bucket_end(unsigned hash) {
+    return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
+  }
+
+  /// RemoveNode - Remove a node from the folding set, returning true if one
+  /// was removed or false if the node was not in the folding set.
+  bool RemoveNode(T *N) { return FoldingSetBase::RemoveNode(N); }
+
+  /// GetOrInsertNode - If there is an existing simple Node exactly
+  /// equal to the specified node, return it.  Otherwise, insert 'N' and
+  /// return it instead.
+  T *GetOrInsertNode(T *N) {
+    return static_cast<T *>(FoldingSetBase::GetOrInsertNode(N));
+  }
+
+  /// FindNodeOrInsertPos - Look up the node specified by ID.  If it exists,
+  /// return it.  If not, return the insertion token that will make insertion
+  /// faster.
+  T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
+    return static_cast<T *>(FoldingSetBase::FindNodeOrInsertPos(ID, InsertPos));
+  }
+
+  /// InsertNode - Insert the specified node into the folding set, knowing that
+  /// it is not already in the folding set.  InsertPos must be obtained from
+  /// FindNodeOrInsertPos.
+  void InsertNode(T *N, void *InsertPos) {
+    FoldingSetBase::InsertNode(N, InsertPos);
+  }
+
+  /// InsertNode - Insert the specified node into the folding set, knowing that
+  /// it is not already in the folding set.
+  void InsertNode(T *N) {
+    T *Inserted = GetOrInsertNode(N);
+    (void)Inserted;
+    assert(Inserted == N && "Node already inserted!");
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSet - This template class is used to instantiate a specialized
+/// implementation of the folding set to the node class T.  T must be a
+/// subclass of FoldingSetNode and implement a Profile function.
+///
+/// Note that this set type is movable and move-assignable. However, its
+/// moved-from state is not a valid state for anything other than
+/// move-assigning and destroying. This is primarily to enable movable APIs
+/// that incorporate these objects.
+template <class T> class FoldingSet final : public FoldingSetImpl<T> {
+  using Super = FoldingSetImpl<T>;
+  using Node = typename Super::Node;
+
+  /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
+  /// way to convert nodes into a unique specifier.
+  void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const override {
+    T *TN = static_cast<T *>(N);
+    FoldingSetTrait<T>::Profile(*TN, ID);
+  }
+
+  /// NodeEquals - Instantiations may optionally provide a way to compare a
+  /// node with a specified ID.
+  bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
+                  FoldingSetNodeID &TempID) const override {
+    T *TN = static_cast<T *>(N);
+    return FoldingSetTrait<T>::Equals(*TN, ID, IDHash, TempID);
+  }
+
+  /// ComputeNodeHash - Instantiations may optionally provide a way to compute a
+  /// hash value directly from a node.
+  unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const override {
+    T *TN = static_cast<T *>(N);
+    return FoldingSetTrait<T>::ComputeHash(*TN, TempID);
+  }
+
+public:
+  explicit FoldingSet(unsigned Log2InitSize = 6) : Super(Log2InitSize) {}
+  FoldingSet(FoldingSet &&Arg) = default;
+  FoldingSet &operator=(FoldingSet &&RHS) = default;
+};
+
+//===----------------------------------------------------------------------===//
+/// ContextualFoldingSet - This template class is a further refinement
+/// of FoldingSet which provides a context argument when calling
+/// Profile on its nodes.  Currently, that argument is fixed at
+/// initialization time.
+///
+/// T must be a subclass of FoldingSetNode and implement a Profile
+/// function with signature
+///   void Profile(FoldingSetNodeID &, Ctx);
+template <class T, class Ctx>
+class ContextualFoldingSet final : public FoldingSetImpl<T> {
+  // Unfortunately, this can't derive from FoldingSet<T> because the
+  // construction of the vtable for FoldingSet<T> requires
+  // FoldingSet<T>::GetNodeProfile to be instantiated, which in turn
+  // requires a single-argument T::Profile().
+
+  using Super = FoldingSetImpl<T>;
+  using Node = typename Super::Node;
+
+  Ctx Context;
+
+  /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
+  /// way to convert nodes into a unique specifier.
+  void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const override {
+    T *TN = static_cast<T *>(N);
+    ContextualFoldingSetTrait<T, Ctx>::Profile(*TN, ID, Context);
+  }
+
+  bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
+                  FoldingSetNodeID &TempID) const override {
+    T *TN = static_cast<T *>(N);
+    return ContextualFoldingSetTrait<T, Ctx>::Equals(*TN, ID, IDHash, TempID,
+                                                     Context);
+  }
+
+  unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const override {
+    T *TN = static_cast<T *>(N);
+    return ContextualFoldingSetTrait<T, Ctx>::ComputeHash(*TN, TempID, Context);
+  }
+
+public:
+  explicit ContextualFoldingSet(Ctx Context, unsigned Log2InitSize = 6)
+      : Super(Log2InitSize), Context(Context) {}
+
+  Ctx getContext() const { return Context; }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetVector - This template class combines a FoldingSet and a vector
+/// to provide the interface of FoldingSet but with deterministic iteration
+/// order based on the insertion order. T must be a subclass of FoldingSetNode
+/// and implement a Profile function.
+template <class T, class VectorT = SmallVector<T*, 8>>
+class FoldingSetVector {
+  FoldingSet<T> Set;
+  VectorT Vector;
+
+public:
+  explicit FoldingSetVector(unsigned Log2InitSize = 6) : Set(Log2InitSize) {}
+
+  using iterator = pointee_iterator<typename VectorT::iterator>;
+
+  iterator begin() { return Vector.begin(); }
+  iterator end()   { return Vector.end(); }
+
+  using const_iterator = pointee_iterator<typename VectorT::const_iterator>;
+
+  const_iterator begin() const { return Vector.begin(); }
+  const_iterator end()   const { return Vector.end(); }
+
+  /// clear - Remove all nodes from the folding set.
+  void clear() { Set.clear(); Vector.clear(); }
+
+  /// FindNodeOrInsertPos - Look up the node specified by ID.  If it exists,
+  /// return it.  If not, return the insertion token that will make insertion
+  /// faster.
+  T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
+    return Set.FindNodeOrInsertPos(ID, InsertPos);
+  }
+
+  /// GetOrInsertNode - If there is an existing simple Node exactly
+  /// equal to the specified node, return it.  Otherwise, insert 'N' and
+  /// return it instead.
+  T *GetOrInsertNode(T *N) {
+    T *Result = Set.GetOrInsertNode(N);
+    if (Result == N) Vector.push_back(N);
+    return Result;
+  }
+
+  /// InsertNode - Insert the specified node into the folding set, knowing that
+  /// it is not already in the folding set.  InsertPos must be obtained from
+  /// FindNodeOrInsertPos.
+  void InsertNode(T *N, void *InsertPos) {
+    Set.InsertNode(N, InsertPos);
+    Vector.push_back(N);
+  }
+
+  /// InsertNode - Insert the specified node into the folding set, knowing that
+  /// it is not already in the folding set.
+  void InsertNode(T *N) {
+    Set.InsertNode(N);
+    Vector.push_back(N);
+  }
+
+  /// size - Returns the number of nodes in the folding set.
+  unsigned size() const { return Set.size(); }
+
+  /// empty - Returns true if there are no nodes in the folding set.
+  bool empty() const { return Set.empty(); }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetIteratorImpl - This is the common iterator support shared by all
+/// folding sets, which knows how to walk the folding set hash table.
+class FoldingSetIteratorImpl {
+protected:
+  FoldingSetNode *NodePtr;
+
+  FoldingSetIteratorImpl(void **Bucket);
+
+  void advance();
+
+public:
+  bool operator==(const FoldingSetIteratorImpl &RHS) const {
+    return NodePtr == RHS.NodePtr;
+  }
+  bool operator!=(const FoldingSetIteratorImpl &RHS) const {
+    return NodePtr != RHS.NodePtr;
+  }
+};
+
+template <class T> class FoldingSetIterator : public FoldingSetIteratorImpl {
+public:
+  explicit FoldingSetIterator(void **Bucket) : FoldingSetIteratorImpl(Bucket) {}
+
+  T &operator*() const {
+    return *static_cast<T*>(NodePtr);
+  }
+
+  T *operator->() const {
+    return static_cast<T*>(NodePtr);
+  }
+
+  inline FoldingSetIterator &operator++() {          // Preincrement
+    advance();
+    return *this;
+  }
+  FoldingSetIterator operator++(int) {        // Postincrement
+    FoldingSetIterator tmp = *this; ++*this; return tmp;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetBucketIteratorImpl - This is the common bucket iterator support
+/// shared by all folding sets, which knows how to walk a particular bucket
+/// of a folding set hash table.
+class FoldingSetBucketIteratorImpl {
+protected:
+  void *Ptr;
+
+  explicit FoldingSetBucketIteratorImpl(void **Bucket);
+
+  FoldingSetBucketIteratorImpl(void **Bucket, bool) : Ptr(Bucket) {}
+
+  void advance() {
+    void *Probe = static_cast<FoldingSetNode*>(Ptr)->getNextInBucket();
+    uintptr_t x = reinterpret_cast<uintptr_t>(Probe) & ~0x1;
+    Ptr = reinterpret_cast<void*>(x);
+  }
+
+public:
+  bool operator==(const FoldingSetBucketIteratorImpl &RHS) const {
+    return Ptr == RHS.Ptr;
+  }
+  bool operator!=(const FoldingSetBucketIteratorImpl &RHS) const {
+    return Ptr != RHS.Ptr;
+  }
+};
+
+template <class T>
+class FoldingSetBucketIterator : public FoldingSetBucketIteratorImpl {
+public:
+  explicit FoldingSetBucketIterator(void **Bucket) :
+    FoldingSetBucketIteratorImpl(Bucket) {}
+
+  FoldingSetBucketIterator(void **Bucket, bool) :
+    FoldingSetBucketIteratorImpl(Bucket, true) {}
+
+  T &operator*() const { return *static_cast<T*>(Ptr); }
+  T *operator->() const { return static_cast<T*>(Ptr); }
+
+  inline FoldingSetBucketIterator &operator++() { // Preincrement
+    advance();
+    return *this;
+  }
+  FoldingSetBucketIterator operator++(int) {      // Postincrement
+    FoldingSetBucketIterator tmp = *this; ++*this; return tmp;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetNodeWrapper - This template class is used to "wrap" arbitrary
+/// types in an enclosing object so that they can be inserted into FoldingSets.
+template <typename T>
+class FoldingSetNodeWrapper : public FoldingSetNode {
+  T data;
+
+public:
+  template <typename... Ts>
+  explicit FoldingSetNodeWrapper(Ts &&... Args)
+      : data(std::forward<Ts>(Args)...) {}
+
+  void Profile(FoldingSetNodeID &ID) { FoldingSetTrait<T>::Profile(data, ID); }
+
+  T &getValue() { return data; }
+  const T &getValue() const { return data; }
+
+  operator T&() { return data; }
+  operator const T&() const { return data; }
+};
+
+//===----------------------------------------------------------------------===//
+/// FastFoldingSetNode - This is a subclass of FoldingSetNode which stores
+/// a FoldingSetNodeID value rather than requiring the node to recompute it
+/// each time it is needed. This trades space for speed (which can be
+/// significant if the ID is long), and it also permits nodes to drop
+/// information that would otherwise only be required for recomputing an ID.
+class FastFoldingSetNode : public FoldingSetNode {
+  FoldingSetNodeID FastID;
+
+protected:
+  explicit FastFoldingSetNode(const FoldingSetNodeID &ID) : FastID(ID) {}
+
+public:
+  void Profile(FoldingSetNodeID &ID) const { ID.AddNodeID(FastID); }
+};
+
+//===----------------------------------------------------------------------===//
+// Partial specializations of FoldingSetTrait.
+
+template<typename T> struct FoldingSetTrait<T*> {
+  static inline void Profile(T *X, FoldingSetNodeID &ID) {
+    ID.AddPointer(X);
+  }
+};
+template <typename T1, typename T2>
+struct FoldingSetTrait<std::pair<T1, T2>> {
+  static inline void Profile(const std::pair<T1, T2> &P,
+                             FoldingSetNodeID &ID) {
+    ID.Add(P.first);
+    ID.Add(P.second);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_FOLDINGSET_H
diff --git a/linux-x64/clang/include/llvm/ADT/GraphTraits.h b/linux-x64/clang/include/llvm/ADT/GraphTraits.h
new file mode 100644
index 0000000..27c647f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/GraphTraits.h
@@ -0,0 +1,136 @@
+//===- llvm/ADT/GraphTraits.h - Graph traits template -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the little GraphTraits<X> template class that should be
+// specialized by classes that want to be iteratable by generic graph iterators.
+//
+// This file also defines the marker class Inverse that is used to iterate over
+// graphs in a graph defined, inverse ordering...
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_GRAPHTRAITS_H
+#define LLVM_ADT_GRAPHTRAITS_H
+
+#include "llvm/ADT/iterator_range.h"
+
+namespace llvm {
+
+// GraphTraits - This class should be specialized by different graph types...
+// which is why the default version is empty.
+//
+template<class GraphType>
+struct GraphTraits {
+  // Elements to provide:
+
+  // typedef NodeRef           - Type of Node token in the graph, which should
+  //                             be cheap to copy.
+  // typedef ChildIteratorType - Type used to iterate over children in graph,
+  //                             dereference to a NodeRef.
+
+  // static NodeRef getEntryNode(const GraphType &)
+  //    Return the entry node of the graph
+
+  // static ChildIteratorType child_begin(NodeRef)
+  // static ChildIteratorType child_end  (NodeRef)
+  //    Return iterators that point to the beginning and ending of the child
+  //    node list for the specified node.
+
+  // typedef  ...iterator nodes_iterator; - dereference to a NodeRef
+  // static nodes_iterator nodes_begin(GraphType *G)
+  // static nodes_iterator nodes_end  (GraphType *G)
+  //    nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+
+  // typedef EdgeRef           - Type of Edge token in the graph, which should
+  //                             be cheap to copy.
+  // typedef ChildEdgeIteratorType - Type used to iterate over children edges in
+  //                             graph, dereference to a EdgeRef.
+
+  // static ChildEdgeIteratorType child_edge_begin(NodeRef)
+  // static ChildEdgeIteratorType child_edge_end(NodeRef)
+  //     Return iterators that point to the beginning and ending of the
+  //     edge list for the given callgraph node.
+  //
+  // static NodeRef edge_dest(EdgeRef)
+  //     Return the destination node of an edge.
+
+  // static unsigned       size       (GraphType *G)
+  //    Return total number of nodes in the graph
+
+  // If anyone tries to use this class without having an appropriate
+  // specialization, make an error.  If you get this error, it's because you
+  // need to include the appropriate specialization of GraphTraits<> for your
+  // graph, or you need to define it for a new graph type. Either that or
+  // your argument to XXX_begin(...) is unknown or needs to have the proper .h
+  // file #include'd.
+  using NodeRef = typename GraphType::UnknownGraphTypeError;
+};
+
+// Inverse - This class is used as a little marker class to tell the graph
+// iterator to iterate over the graph in a graph defined "Inverse" ordering.
+// Not all graphs define an inverse ordering, and if they do, it depends on
+// the graph exactly what that is.  Here's an example of usage with the
+// df_iterator:
+//
+// idf_iterator<Method*> I = idf_begin(M), E = idf_end(M);
+// for (; I != E; ++I) { ... }
+//
+// Which is equivalent to:
+// df_iterator<Inverse<Method*>> I = idf_begin(M), E = idf_end(M);
+// for (; I != E; ++I) { ... }
+//
+template <class GraphType>
+struct Inverse {
+  const GraphType &Graph;
+
+  inline Inverse(const GraphType &G) : Graph(G) {}
+};
+
+// Provide a partial specialization of GraphTraits so that the inverse of an
+// inverse falls back to the original graph.
+template <class T> struct GraphTraits<Inverse<Inverse<T>>> : GraphTraits<T> {};
+
+// Provide iterator ranges for the graph traits nodes and children
+template <class GraphType>
+iterator_range<typename GraphTraits<GraphType>::nodes_iterator>
+nodes(const GraphType &G) {
+  return make_range(GraphTraits<GraphType>::nodes_begin(G),
+                    GraphTraits<GraphType>::nodes_end(G));
+}
+template <class GraphType>
+iterator_range<typename GraphTraits<Inverse<GraphType>>::nodes_iterator>
+inverse_nodes(const GraphType &G) {
+  return make_range(GraphTraits<Inverse<GraphType>>::nodes_begin(G),
+                    GraphTraits<Inverse<GraphType>>::nodes_end(G));
+}
+
+template <class GraphType>
+iterator_range<typename GraphTraits<GraphType>::ChildIteratorType>
+children(const typename GraphTraits<GraphType>::NodeRef &G) {
+  return make_range(GraphTraits<GraphType>::child_begin(G),
+                    GraphTraits<GraphType>::child_end(G));
+}
+
+template <class GraphType>
+iterator_range<typename GraphTraits<Inverse<GraphType>>::ChildIteratorType>
+inverse_children(const typename GraphTraits<GraphType>::NodeRef &G) {
+  return make_range(GraphTraits<Inverse<GraphType>>::child_begin(G),
+                    GraphTraits<Inverse<GraphType>>::child_end(G));
+}
+
+template <class GraphType>
+iterator_range<typename GraphTraits<GraphType>::ChildEdgeIteratorType>
+children_edges(const typename GraphTraits<GraphType>::NodeRef &G) {
+  return make_range(GraphTraits<GraphType>::child_edge_begin(G),
+                    GraphTraits<GraphType>::child_edge_end(G));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_GRAPHTRAITS_H
diff --git a/linux-x64/clang/include/llvm/ADT/Hashing.h b/linux-x64/clang/include/llvm/ADT/Hashing.h
new file mode 100644
index 0000000..c3b5741
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/Hashing.h
@@ -0,0 +1,661 @@
+//===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the newly proposed standard C++ interfaces for hashing
+// arbitrary data and building hash functions for user-defined types. This
+// interface was originally proposed in N3333[1] and is currently under review
+// for inclusion in a future TR and/or standard.
+//
+// The primary interfaces provide are comprised of one type and three functions:
+//
+//  -- 'hash_code' class is an opaque type representing the hash code for some
+//     data. It is the intended product of hashing, and can be used to implement
+//     hash tables, checksumming, and other common uses of hashes. It is not an
+//     integer type (although it can be converted to one) because it is risky
+//     to assume much about the internals of a hash_code. In particular, each
+//     execution of the program has a high probability of producing a different
+//     hash_code for a given input. Thus their values are not stable to save or
+//     persist, and should only be used during the execution for the
+//     construction of hashing datastructures.
+//
+//  -- 'hash_value' is a function designed to be overloaded for each
+//     user-defined type which wishes to be used within a hashing context. It
+//     should be overloaded within the user-defined type's namespace and found
+//     via ADL. Overloads for primitive types are provided by this library.
+//
+//  -- 'hash_combine' and 'hash_combine_range' are functions designed to aid
+//      programmers in easily and intuitively combining a set of data into
+//      a single hash_code for their object. They should only logically be used
+//      within the implementation of a 'hash_value' routine or similar context.
+//
+// Note that 'hash_combine_range' contains very special logic for hashing
+// a contiguous array of integers or pointers. This logic is *extremely* fast,
+// on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were
+// benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys
+// under 32-bytes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_HASHING_H
+#define LLVM_ADT_HASHING_H
+
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/SwapByteOrder.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm>
+#include <cassert>
+#include <cstring>
+#include <string>
+#include <utility>
+
+namespace llvm {
+
+/// \brief An opaque object representing a hash code.
+///
+/// This object represents the result of hashing some entity. It is intended to
+/// be used to implement hashtables or other hashing-based data structures.
+/// While it wraps and exposes a numeric value, this value should not be
+/// trusted to be stable or predictable across processes or executions.
+///
+/// In order to obtain the hash_code for an object 'x':
+/// \code
+///   using llvm::hash_value;
+///   llvm::hash_code code = hash_value(x);
+/// \endcode
+class hash_code {
+  size_t value;
+
+public:
+  /// \brief Default construct a hash_code.
+  /// Note that this leaves the value uninitialized.
+  hash_code() = default;
+
+  /// \brief Form a hash code directly from a numerical value.
+  hash_code(size_t value) : value(value) {}
+
+  /// \brief Convert the hash code to its numerical value for use.
+  /*explicit*/ operator size_t() const { return value; }
+
+  friend bool operator==(const hash_code &lhs, const hash_code &rhs) {
+    return lhs.value == rhs.value;
+  }
+  friend bool operator!=(const hash_code &lhs, const hash_code &rhs) {
+    return lhs.value != rhs.value;
+  }
+
+  /// \brief Allow a hash_code to be directly run through hash_value.
+  friend size_t hash_value(const hash_code &code) { return code.value; }
+};
+
+/// \brief Compute a hash_code for any integer value.
+///
+/// Note that this function is intended to compute the same hash_code for
+/// a particular value without regard to the pre-promotion type. This is in
+/// contrast to hash_combine which may produce different hash_codes for
+/// differing argument types even if they would implicit promote to a common
+/// type without changing the value.
+template <typename T>
+typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type
+hash_value(T value);
+
+/// \brief Compute a hash_code for a pointer's address.
+///
+/// N.B.: This hashes the *address*. Not the value and not the type.
+template <typename T> hash_code hash_value(const T *ptr);
+
+/// \brief Compute a hash_code for a pair of objects.
+template <typename T, typename U>
+hash_code hash_value(const std::pair<T, U> &arg);
+
+/// \brief Compute a hash_code for a standard string.
+template <typename T>
+hash_code hash_value(const std::basic_string<T> &arg);
+
+
+/// \brief Override the execution seed with a fixed value.
+///
+/// This hashing library uses a per-execution seed designed to change on each
+/// run with high probability in order to ensure that the hash codes are not
+/// attackable and to ensure that output which is intended to be stable does
+/// not rely on the particulars of the hash codes produced.
+///
+/// That said, there are use cases where it is important to be able to
+/// reproduce *exactly* a specific behavior. To that end, we provide a function
+/// which will forcibly set the seed to a fixed value. This must be done at the
+/// start of the program, before any hashes are computed. Also, it cannot be
+/// undone. This makes it thread-hostile and very hard to use outside of
+/// immediately on start of a simple program designed for reproducible
+/// behavior.
+void set_fixed_execution_hash_seed(size_t fixed_value);
+
+
+// All of the implementation details of actually computing the various hash
+// code values are held within this namespace. These routines are included in
+// the header file mainly to allow inlining and constant propagation.
+namespace hashing {
+namespace detail {
+
+inline uint64_t fetch64(const char *p) {
+  uint64_t result;
+  memcpy(&result, p, sizeof(result));
+  if (sys::IsBigEndianHost)
+    sys::swapByteOrder(result);
+  return result;
+}
+
+inline uint32_t fetch32(const char *p) {
+  uint32_t result;
+  memcpy(&result, p, sizeof(result));
+  if (sys::IsBigEndianHost)
+    sys::swapByteOrder(result);
+  return result;
+}
+
+/// Some primes between 2^63 and 2^64 for various uses.
+static const uint64_t k0 = 0xc3a5c85c97cb3127ULL;
+static const uint64_t k1 = 0xb492b66fbe98f273ULL;
+static const uint64_t k2 = 0x9ae16a3b2f90404fULL;
+static const uint64_t k3 = 0xc949d7c7509e6557ULL;
+
+/// \brief Bitwise right rotate.
+/// Normally this will compile to a single instruction, especially if the
+/// shift is a manifest constant.
+inline uint64_t rotate(uint64_t val, size_t shift) {
+  // Avoid shifting by 64: doing so yields an undefined result.
+  return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
+}
+
+inline uint64_t shift_mix(uint64_t val) {
+  return val ^ (val >> 47);
+}
+
+inline uint64_t hash_16_bytes(uint64_t low, uint64_t high) {
+  // Murmur-inspired hashing.
+  const uint64_t kMul = 0x9ddfea08eb382d69ULL;
+  uint64_t a = (low ^ high) * kMul;
+  a ^= (a >> 47);
+  uint64_t b = (high ^ a) * kMul;
+  b ^= (b >> 47);
+  b *= kMul;
+  return b;
+}
+
+inline uint64_t hash_1to3_bytes(const char *s, size_t len, uint64_t seed) {
+  uint8_t a = s[0];
+  uint8_t b = s[len >> 1];
+  uint8_t c = s[len - 1];
+  uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);
+  uint32_t z = len + (static_cast<uint32_t>(c) << 2);
+  return shift_mix(y * k2 ^ z * k3 ^ seed) * k2;
+}
+
+inline uint64_t hash_4to8_bytes(const char *s, size_t len, uint64_t seed) {
+  uint64_t a = fetch32(s);
+  return hash_16_bytes(len + (a << 3), seed ^ fetch32(s + len - 4));
+}
+
+inline uint64_t hash_9to16_bytes(const char *s, size_t len, uint64_t seed) {
+  uint64_t a = fetch64(s);
+  uint64_t b = fetch64(s + len - 8);
+  return hash_16_bytes(seed ^ a, rotate(b + len, len)) ^ b;
+}
+
+inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) {
+  uint64_t a = fetch64(s) * k1;
+  uint64_t b = fetch64(s + 8);
+  uint64_t c = fetch64(s + len - 8) * k2;
+  uint64_t d = fetch64(s + len - 16) * k0;
+  return hash_16_bytes(rotate(a - b, 43) + rotate(c ^ seed, 30) + d,
+                       a + rotate(b ^ k3, 20) - c + len + seed);
+}
+
+inline uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed) {
+  uint64_t z = fetch64(s + 24);
+  uint64_t a = fetch64(s) + (len + fetch64(s + len - 16)) * k0;
+  uint64_t b = rotate(a + z, 52);
+  uint64_t c = rotate(a, 37);
+  a += fetch64(s + 8);
+  c += rotate(a, 7);
+  a += fetch64(s + 16);
+  uint64_t vf = a + z;
+  uint64_t vs = b + rotate(a, 31) + c;
+  a = fetch64(s + 16) + fetch64(s + len - 32);
+  z = fetch64(s + len - 8);
+  b = rotate(a + z, 52);
+  c = rotate(a, 37);
+  a += fetch64(s + len - 24);
+  c += rotate(a, 7);
+  a += fetch64(s + len - 16);
+  uint64_t wf = a + z;
+  uint64_t ws = b + rotate(a, 31) + c;
+  uint64_t r = shift_mix((vf + ws) * k2 + (wf + vs) * k0);
+  return shift_mix((seed ^ (r * k0)) + vs) * k2;
+}
+
+inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) {
+  if (length >= 4 && length <= 8)
+    return hash_4to8_bytes(s, length, seed);
+  if (length > 8 && length <= 16)
+    return hash_9to16_bytes(s, length, seed);
+  if (length > 16 && length <= 32)
+    return hash_17to32_bytes(s, length, seed);
+  if (length > 32)
+    return hash_33to64_bytes(s, length, seed);
+  if (length != 0)
+    return hash_1to3_bytes(s, length, seed);
+
+  return k2 ^ seed;
+}
+
+/// \brief The intermediate state used during hashing.
+/// Currently, the algorithm for computing hash codes is based on CityHash and
+/// keeps 56 bytes of arbitrary state.
+struct hash_state {
+  uint64_t h0, h1, h2, h3, h4, h5, h6;
+
+  /// \brief Create a new hash_state structure and initialize it based on the
+  /// seed and the first 64-byte chunk.
+  /// This effectively performs the initial mix.
+  static hash_state create(const char *s, uint64_t seed) {
+    hash_state state = {
+      0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49),
+      seed * k1, shift_mix(seed), 0 };
+    state.h6 = hash_16_bytes(state.h4, state.h5);
+    state.mix(s);
+    return state;
+  }
+
+  /// \brief Mix 32-bytes from the input sequence into the 16-bytes of 'a'
+  /// and 'b', including whatever is already in 'a' and 'b'.
+  static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) {
+    a += fetch64(s);
+    uint64_t c = fetch64(s + 24);
+    b = rotate(b + a + c, 21);
+    uint64_t d = a;
+    a += fetch64(s + 8) + fetch64(s + 16);
+    b += rotate(a, 44) + d;
+    a += c;
+  }
+
+  /// \brief Mix in a 64-byte buffer of data.
+  /// We mix all 64 bytes even when the chunk length is smaller, but we
+  /// record the actual length.
+  void mix(const char *s) {
+    h0 = rotate(h0 + h1 + h3 + fetch64(s + 8), 37) * k1;
+    h1 = rotate(h1 + h4 + fetch64(s + 48), 42) * k1;
+    h0 ^= h6;
+    h1 += h3 + fetch64(s + 40);
+    h2 = rotate(h2 + h5, 33) * k1;
+    h3 = h4 * k1;
+    h4 = h0 + h5;
+    mix_32_bytes(s, h3, h4);
+    h5 = h2 + h6;
+    h6 = h1 + fetch64(s + 16);
+    mix_32_bytes(s + 32, h5, h6);
+    std::swap(h2, h0);
+  }
+
+  /// \brief Compute the final 64-bit hash code value based on the current
+  /// state and the length of bytes hashed.
+  uint64_t finalize(size_t length) {
+    return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2,
+                         hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0);
+  }
+};
+
+
+/// \brief A global, fixed seed-override variable.
+///
+/// This variable can be set using the \see llvm::set_fixed_execution_seed
+/// function. See that function for details. Do not, under any circumstances,
+/// set or read this variable.
+extern size_t fixed_seed_override;
+
+inline size_t get_execution_seed() {
+  // FIXME: This needs to be a per-execution seed. This is just a placeholder
+  // implementation. Switching to a per-execution seed is likely to flush out
+  // instability bugs and so will happen as its own commit.
+  //
+  // However, if there is a fixed seed override set the first time this is
+  // called, return that instead of the per-execution seed.
+  const uint64_t seed_prime = 0xff51afd7ed558ccdULL;
+  static size_t seed = fixed_seed_override ? fixed_seed_override
+                                           : (size_t)seed_prime;
+  return seed;
+}
+
+
+/// \brief Trait to indicate whether a type's bits can be hashed directly.
+///
+/// A type trait which is true if we want to combine values for hashing by
+/// reading the underlying data. It is false if values of this type must
+/// first be passed to hash_value, and the resulting hash_codes combined.
+//
+// FIXME: We want to replace is_integral_or_enum and is_pointer here with
+// a predicate which asserts that comparing the underlying storage of two
+// values of the type for equality is equivalent to comparing the two values
+// for equality. For all the platforms we care about, this holds for integers
+// and pointers, but there are platforms where it doesn't and we would like to
+// support user-defined types which happen to satisfy this property.
+template <typename T> struct is_hashable_data
+  : std::integral_constant<bool, ((is_integral_or_enum<T>::value ||
+                                   std::is_pointer<T>::value) &&
+                                  64 % sizeof(T) == 0)> {};
+
+// Special case std::pair to detect when both types are viable and when there
+// is no alignment-derived padding in the pair. This is a bit of a lie because
+// std::pair isn't truly POD, but it's close enough in all reasonable
+// implementations for our use case of hashing the underlying data.
+template <typename T, typename U> struct is_hashable_data<std::pair<T, U> >
+  : std::integral_constant<bool, (is_hashable_data<T>::value &&
+                                  is_hashable_data<U>::value &&
+                                  (sizeof(T) + sizeof(U)) ==
+                                   sizeof(std::pair<T, U>))> {};
+
+/// \brief Helper to get the hashable data representation for a type.
+/// This variant is enabled when the type itself can be used.
+template <typename T>
+typename std::enable_if<is_hashable_data<T>::value, T>::type
+get_hashable_data(const T &value) {
+  return value;
+}
+/// \brief Helper to get the hashable data representation for a type.
+/// This variant is enabled when we must first call hash_value and use the
+/// result as our data.
+template <typename T>
+typename std::enable_if<!is_hashable_data<T>::value, size_t>::type
+get_hashable_data(const T &value) {
+  using ::llvm::hash_value;
+  return hash_value(value);
+}
+
+/// \brief Helper to store data from a value into a buffer and advance the
+/// pointer into that buffer.
+///
+/// This routine first checks whether there is enough space in the provided
+/// buffer, and if not immediately returns false. If there is space, it
+/// copies the underlying bytes of value into the buffer, advances the
+/// buffer_ptr past the copied bytes, and returns true.
+template <typename T>
+bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value,
+                       size_t offset = 0) {
+  size_t store_size = sizeof(value) - offset;
+  if (buffer_ptr + store_size > buffer_end)
+    return false;
+  const char *value_data = reinterpret_cast<const char *>(&value);
+  memcpy(buffer_ptr, value_data + offset, store_size);
+  buffer_ptr += store_size;
+  return true;
+}
+
+/// \brief Implement the combining of integral values into a hash_code.
+///
+/// This overload is selected when the value type of the iterator is
+/// integral. Rather than computing a hash_code for each object and then
+/// combining them, this (as an optimization) directly combines the integers.
+template <typename InputIteratorT>
+hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
+  const size_t seed = get_execution_seed();
+  char buffer[64], *buffer_ptr = buffer;
+  char *const buffer_end = std::end(buffer);
+  while (first != last && store_and_advance(buffer_ptr, buffer_end,
+                                            get_hashable_data(*first)))
+    ++first;
+  if (first == last)
+    return hash_short(buffer, buffer_ptr - buffer, seed);
+  assert(buffer_ptr == buffer_end);
+
+  hash_state state = state.create(buffer, seed);
+  size_t length = 64;
+  while (first != last) {
+    // Fill up the buffer. We don't clear it, which re-mixes the last round
+    // when only a partial 64-byte chunk is left.
+    buffer_ptr = buffer;
+    while (first != last && store_and_advance(buffer_ptr, buffer_end,
+                                              get_hashable_data(*first)))
+      ++first;
+
+    // Rotate the buffer if we did a partial fill in order to simulate doing
+    // a mix of the last 64-bytes. That is how the algorithm works when we
+    // have a contiguous byte sequence, and we want to emulate that here.
+    std::rotate(buffer, buffer_ptr, buffer_end);
+
+    // Mix this chunk into the current state.
+    state.mix(buffer);
+    length += buffer_ptr - buffer;
+  };
+
+  return state.finalize(length);
+}
+
+/// \brief Implement the combining of integral values into a hash_code.
+///
+/// This overload is selected when the value type of the iterator is integral
+/// and when the input iterator is actually a pointer. Rather than computing
+/// a hash_code for each object and then combining them, this (as an
+/// optimization) directly combines the integers. Also, because the integers
+/// are stored in contiguous memory, this routine avoids copying each value
+/// and directly reads from the underlying memory.
+template <typename ValueT>
+typename std::enable_if<is_hashable_data<ValueT>::value, hash_code>::type
+hash_combine_range_impl(ValueT *first, ValueT *last) {
+  const size_t seed = get_execution_seed();
+  const char *s_begin = reinterpret_cast<const char *>(first);
+  const char *s_end = reinterpret_cast<const char *>(last);
+  const size_t length = std::distance(s_begin, s_end);
+  if (length <= 64)
+    return hash_short(s_begin, length, seed);
+
+  const char *s_aligned_end = s_begin + (length & ~63);
+  hash_state state = state.create(s_begin, seed);
+  s_begin += 64;
+  while (s_begin != s_aligned_end) {
+    state.mix(s_begin);
+    s_begin += 64;
+  }
+  if (length & 63)
+    state.mix(s_end - 64);
+
+  return state.finalize(length);
+}
+
+} // namespace detail
+} // namespace hashing
+
+
+/// \brief Compute a hash_code for a sequence of values.
+///
+/// This hashes a sequence of values. It produces the same hash_code as
+/// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences
+/// and is significantly faster given pointers and types which can be hashed as
+/// a sequence of bytes.
+template <typename InputIteratorT>
+hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) {
+  return ::llvm::hashing::detail::hash_combine_range_impl(first, last);
+}
+
+
+// Implementation details for hash_combine.
+namespace hashing {
+namespace detail {
+
+/// \brief Helper class to manage the recursive combining of hash_combine
+/// arguments.
+///
+/// This class exists to manage the state and various calls involved in the
+/// recursive combining of arguments used in hash_combine. It is particularly
+/// useful at minimizing the code in the recursive calls to ease the pain
+/// caused by a lack of variadic functions.
+struct hash_combine_recursive_helper {
+  char buffer[64];
+  hash_state state;
+  const size_t seed;
+
+public:
+  /// \brief Construct a recursive hash combining helper.
+  ///
+  /// This sets up the state for a recursive hash combine, including getting
+  /// the seed and buffer setup.
+  hash_combine_recursive_helper()
+    : seed(get_execution_seed()) {}
+
+  /// \brief Combine one chunk of data into the current in-flight hash.
+  ///
+  /// This merges one chunk of data into the hash. First it tries to buffer
+  /// the data. If the buffer is full, it hashes the buffer into its
+  /// hash_state, empties it, and then merges the new chunk in. This also
+  /// handles cases where the data straddles the end of the buffer.
+  template <typename T>
+  char *combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data) {
+    if (!store_and_advance(buffer_ptr, buffer_end, data)) {
+      // Check for skew which prevents the buffer from being packed, and do
+      // a partial store into the buffer to fill it. This is only a concern
+      // with the variadic combine because that formation can have varying
+      // argument types.
+      size_t partial_store_size = buffer_end - buffer_ptr;
+      memcpy(buffer_ptr, &data, partial_store_size);
+
+      // If the store fails, our buffer is full and ready to hash. We have to
+      // either initialize the hash state (on the first full buffer) or mix
+      // this buffer into the existing hash state. Length tracks the *hashed*
+      // length, not the buffered length.
+      if (length == 0) {
+        state = state.create(buffer, seed);
+        length = 64;
+      } else {
+        // Mix this chunk into the current state and bump length up by 64.
+        state.mix(buffer);
+        length += 64;
+      }
+      // Reset the buffer_ptr to the head of the buffer for the next chunk of
+      // data.
+      buffer_ptr = buffer;
+
+      // Try again to store into the buffer -- this cannot fail as we only
+      // store types smaller than the buffer.
+      if (!store_and_advance(buffer_ptr, buffer_end, data,
+                             partial_store_size))
+        abort();
+    }
+    return buffer_ptr;
+  }
+
+  /// \brief Recursive, variadic combining method.
+  ///
+  /// This function recurses through each argument, combining that argument
+  /// into a single hash.
+  template <typename T, typename ...Ts>
+  hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+                    const T &arg, const Ts &...args) {
+    buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg));
+
+    // Recurse to the next argument.
+    return combine(length, buffer_ptr, buffer_end, args...);
+  }
+
+  /// \brief Base case for recursive, variadic combining.
+  ///
+  /// The base case when combining arguments recursively is reached when all
+  /// arguments have been handled. It flushes the remaining buffer and
+  /// constructs a hash_code.
+  hash_code combine(size_t length, char *buffer_ptr, char *buffer_end) {
+    // Check whether the entire set of values fit in the buffer. If so, we'll
+    // use the optimized short hashing routine and skip state entirely.
+    if (length == 0)
+      return hash_short(buffer, buffer_ptr - buffer, seed);
+
+    // Mix the final buffer, rotating it if we did a partial fill in order to
+    // simulate doing a mix of the last 64-bytes. That is how the algorithm
+    // works when we have a contiguous byte sequence, and we want to emulate
+    // that here.
+    std::rotate(buffer, buffer_ptr, buffer_end);
+
+    // Mix this chunk into the current state.
+    state.mix(buffer);
+    length += buffer_ptr - buffer;
+
+    return state.finalize(length);
+  }
+};
+
+} // namespace detail
+} // namespace hashing
+
+/// \brief Combine values into a single hash_code.
+///
+/// This routine accepts a varying number of arguments of any type. It will
+/// attempt to combine them into a single hash_code. For user-defined types it
+/// attempts to call a \see hash_value overload (via ADL) for the type. For
+/// integer and pointer types it directly combines their data into the
+/// resulting hash_code.
+///
+/// The result is suitable for returning from a user's hash_value
+/// *implementation* for their user-defined type. Consumers of a type should
+/// *not* call this routine, they should instead call 'hash_value'.
+template <typename ...Ts> hash_code hash_combine(const Ts &...args) {
+  // Recursively hash each argument using a helper class.
+  ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+  return helper.combine(0, helper.buffer, helper.buffer + 64, args...);
+}
+
+// Implementation details for implementations of hash_value overloads provided
+// here.
+namespace hashing {
+namespace detail {
+
+/// \brief Helper to hash the value of a single integer.
+///
+/// Overloads for smaller integer types are not provided to ensure consistent
+/// behavior in the presence of integral promotions. Essentially,
+/// "hash_value('4')" and "hash_value('0' + 4)" should be the same.
+inline hash_code hash_integer_value(uint64_t value) {
+  // Similar to hash_4to8_bytes but using a seed instead of length.
+  const uint64_t seed = get_execution_seed();
+  const char *s = reinterpret_cast<const char *>(&value);
+  const uint64_t a = fetch32(s);
+  return hash_16_bytes(seed + (a << 3), fetch32(s + 4));
+}
+
+} // namespace detail
+} // namespace hashing
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T>
+typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type
+hash_value(T value) {
+  return ::llvm::hashing::detail::hash_integer_value(
+      static_cast<uint64_t>(value));
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T> hash_code hash_value(const T *ptr) {
+  return ::llvm::hashing::detail::hash_integer_value(
+    reinterpret_cast<uintptr_t>(ptr));
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T, typename U>
+hash_code hash_value(const std::pair<T, U> &arg) {
+  return hash_combine(arg.first, arg.second);
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T>
+hash_code hash_value(const std::basic_string<T> &arg) {
+  return hash_combine_range(arg.begin(), arg.end());
+}
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/ImmutableList.h b/linux-x64/clang/include/llvm/ADT/ImmutableList.h
new file mode 100644
index 0000000..60d63e0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ImmutableList.h
@@ -0,0 +1,235 @@
+//==--- ImmutableList.h - Immutable (functional) list interface --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ImmutableList class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_IMMUTABLELIST_H
+#define LLVM_ADT_IMMUTABLELIST_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <cstdint>
+#include <new>
+
+namespace llvm {
+
+template <typename T> class ImmutableListFactory;
+
+template <typename T>
+class ImmutableListImpl : public FoldingSetNode {
+  friend class ImmutableListFactory<T>;
+
+  T Head;
+  const ImmutableListImpl* Tail;
+
+  ImmutableListImpl(const T& head, const ImmutableListImpl* tail = nullptr)
+    : Head(head), Tail(tail) {}
+
+public:
+  ImmutableListImpl(const ImmutableListImpl &) = delete;
+  ImmutableListImpl &operator=(const ImmutableListImpl &) = delete;
+
+  const T& getHead() const { return Head; }
+  const ImmutableListImpl* getTail() const { return Tail; }
+
+  static inline void Profile(FoldingSetNodeID& ID, const T& H,
+                             const ImmutableListImpl* L){
+    ID.AddPointer(L);
+    ID.Add(H);
+  }
+
+  void Profile(FoldingSetNodeID& ID) {
+    Profile(ID, Head, Tail);
+  }
+};
+
+/// ImmutableList - This class represents an immutable (functional) list.
+///  It is implemented as a smart pointer (wraps ImmutableListImpl), so it
+///  it is intended to always be copied by value as if it were a pointer.
+///  This interface matches ImmutableSet and ImmutableMap.  ImmutableList
+///  objects should almost never be created directly, and instead should
+///  be created by ImmutableListFactory objects that manage the lifetime
+///  of a group of lists.  When the factory object is reclaimed, all lists
+///  created by that factory are released as well.
+template <typename T>
+class ImmutableList {
+public:
+  using value_type = T;
+  using Factory = ImmutableListFactory<T>;
+
+private:
+  const ImmutableListImpl<T>* X;
+
+public:
+  // This constructor should normally only be called by ImmutableListFactory<T>.
+  // There may be cases, however, when one needs to extract the internal pointer
+  // and reconstruct a list object from that pointer.
+  ImmutableList(const ImmutableListImpl<T>* x = nullptr) : X(x) {}
+
+  const ImmutableListImpl<T>* getInternalPointer() const {
+    return X;
+  }
+
+  class iterator {
+    const ImmutableListImpl<T>* L = nullptr;
+
+  public:
+    iterator() = default;
+    iterator(ImmutableList l) : L(l.getInternalPointer()) {}
+
+    iterator& operator++() { L = L->getTail(); return *this; }
+    bool operator==(const iterator& I) const { return L == I.L; }
+    bool operator!=(const iterator& I) const { return L != I.L; }
+    const value_type& operator*() const { return L->getHead(); }
+
+    ImmutableList getList() const { return L; }
+  };
+
+  /// begin - Returns an iterator referring to the head of the list, or
+  ///  an iterator denoting the end of the list if the list is empty.
+  iterator begin() const { return iterator(X); }
+
+  /// end - Returns an iterator denoting the end of the list.  This iterator
+  ///  does not refer to a valid list element.
+  iterator end() const { return iterator(); }
+
+  /// isEmpty - Returns true if the list is empty.
+  bool isEmpty() const { return !X; }
+
+  bool contains(const T& V) const {
+    for (iterator I = begin(), E = end(); I != E; ++I) {
+      if (*I == V)
+        return true;
+    }
+    return false;
+  }
+
+  /// isEqual - Returns true if two lists are equal.  Because all lists created
+  ///  from the same ImmutableListFactory are uniqued, this has O(1) complexity
+  ///  because it the contents of the list do not need to be compared.  Note
+  ///  that you should only compare two lists created from the same
+  ///  ImmutableListFactory.
+  bool isEqual(const ImmutableList& L) const { return X == L.X; }
+
+  bool operator==(const ImmutableList& L) const { return isEqual(L); }
+
+  /// getHead - Returns the head of the list.
+  const T& getHead() {
+    assert(!isEmpty() && "Cannot get the head of an empty list.");
+    return X->getHead();
+  }
+
+  /// getTail - Returns the tail of the list, which is another (possibly empty)
+  ///  ImmutableList.
+  ImmutableList getTail() {
+    return X ? X->getTail() : nullptr;
+  }
+
+  void Profile(FoldingSetNodeID& ID) const {
+    ID.AddPointer(X);
+  }
+};
+
+template <typename T>
+class ImmutableListFactory {
+  using ListTy = ImmutableListImpl<T>;
+  using CacheTy = FoldingSet<ListTy>;
+
+  CacheTy Cache;
+  uintptr_t Allocator;
+
+  bool ownsAllocator() const {
+    return (Allocator & 0x1) == 0;
+  }
+
+  BumpPtrAllocator& getAllocator() const {
+    return *reinterpret_cast<BumpPtrAllocator*>(Allocator & ~0x1);
+  }
+
+public:
+  ImmutableListFactory()
+    : Allocator(reinterpret_cast<uintptr_t>(new BumpPtrAllocator())) {}
+
+  ImmutableListFactory(BumpPtrAllocator& Alloc)
+  : Allocator(reinterpret_cast<uintptr_t>(&Alloc) | 0x1) {}
+
+  ~ImmutableListFactory() {
+    if (ownsAllocator()) delete &getAllocator();
+  }
+
+  ImmutableList<T> concat(const T& Head, ImmutableList<T> Tail) {
+    // Profile the new list to see if it already exists in our cache.
+    FoldingSetNodeID ID;
+    void* InsertPos;
+
+    const ListTy* TailImpl = Tail.getInternalPointer();
+    ListTy::Profile(ID, Head, TailImpl);
+    ListTy* L = Cache.FindNodeOrInsertPos(ID, InsertPos);
+
+    if (!L) {
+      // The list does not exist in our cache.  Create it.
+      BumpPtrAllocator& A = getAllocator();
+      L = (ListTy*) A.Allocate<ListTy>();
+      new (L) ListTy(Head, TailImpl);
+
+      // Insert the new list into the cache.
+      Cache.InsertNode(L, InsertPos);
+    }
+
+    return L;
+  }
+
+  ImmutableList<T> add(const T& D, ImmutableList<T> L) {
+    return concat(D, L);
+  }
+
+  ImmutableList<T> getEmptyList() const {
+    return ImmutableList<T>(nullptr);
+  }
+
+  ImmutableList<T> create(const T& X) {
+    return Concat(X, getEmptyList());
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// Partially-specialized Traits.
+//===----------------------------------------------------------------------===//
+
+template<typename T> struct DenseMapInfo;
+template<typename T> struct DenseMapInfo<ImmutableList<T>> {
+  static inline ImmutableList<T> getEmptyKey() {
+    return reinterpret_cast<ImmutableListImpl<T>*>(-1);
+  }
+
+  static inline ImmutableList<T> getTombstoneKey() {
+    return reinterpret_cast<ImmutableListImpl<T>*>(-2);
+  }
+
+  static unsigned getHashValue(ImmutableList<T> X) {
+    uintptr_t PtrVal = reinterpret_cast<uintptr_t>(X.getInternalPointer());
+    return (unsigned((uintptr_t)PtrVal) >> 4) ^
+           (unsigned((uintptr_t)PtrVal) >> 9);
+  }
+
+  static bool isEqual(ImmutableList<T> X1, ImmutableList<T> X2) {
+    return X1 == X2;
+  }
+};
+
+template <typename T> struct isPodLike;
+template <typename T>
+struct isPodLike<ImmutableList<T>> { static const bool value = true; };
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_IMMUTABLELIST_H
diff --git a/linux-x64/clang/include/llvm/ADT/ImmutableMap.h b/linux-x64/clang/include/llvm/ADT/ImmutableMap.h
new file mode 100644
index 0000000..10d1e1f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ImmutableMap.h
@@ -0,0 +1,414 @@
+//===--- ImmutableMap.h - Immutable (functional) map interface --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ImmutableMap class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_IMMUTABLEMAP_H
+#define LLVM_ADT_IMMUTABLEMAP_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableSet.h"
+#include "llvm/Support/Allocator.h"
+#include <utility>
+
+namespace llvm {
+
+/// ImutKeyValueInfo -Traits class used by ImmutableMap.  While both the first
+/// and second elements in a pair are used to generate profile information,
+/// only the first element (the key) is used by isEqual and isLess.
+template <typename T, typename S>
+struct ImutKeyValueInfo {
+  using value_type = const std::pair<T,S>;
+  using value_type_ref = const value_type&;
+  using key_type = const T;
+  using key_type_ref = const T&;
+  using data_type = const S;
+  using data_type_ref = const S&;
+
+  static inline key_type_ref KeyOfValue(value_type_ref V) {
+    return V.first;
+  }
+
+  static inline data_type_ref DataOfValue(value_type_ref V) {
+    return V.second;
+  }
+
+  static inline bool isEqual(key_type_ref L, key_type_ref R) {
+    return ImutContainerInfo<T>::isEqual(L,R);
+  }
+  static inline bool isLess(key_type_ref L, key_type_ref R) {
+    return ImutContainerInfo<T>::isLess(L,R);
+  }
+
+  static inline bool isDataEqual(data_type_ref L, data_type_ref R) {
+    return ImutContainerInfo<S>::isEqual(L,R);
+  }
+
+  static inline void Profile(FoldingSetNodeID& ID, value_type_ref V) {
+    ImutContainerInfo<T>::Profile(ID, V.first);
+    ImutContainerInfo<S>::Profile(ID, V.second);
+  }
+};
+
+template <typename KeyT, typename ValT,
+          typename ValInfo = ImutKeyValueInfo<KeyT,ValT>>
+class ImmutableMap {
+public:
+  using value_type = typename ValInfo::value_type;
+  using value_type_ref = typename ValInfo::value_type_ref;
+  using key_type = typename ValInfo::key_type;
+  using key_type_ref = typename ValInfo::key_type_ref;
+  using data_type = typename ValInfo::data_type;
+  using data_type_ref = typename ValInfo::data_type_ref;
+  using TreeTy = ImutAVLTree<ValInfo>;
+
+protected:
+  TreeTy* Root;
+
+public:
+  /// Constructs a map from a pointer to a tree root.  In general one
+  /// should use a Factory object to create maps instead of directly
+  /// invoking the constructor, but there are cases where make this
+  /// constructor public is useful.
+  explicit ImmutableMap(const TreeTy* R) : Root(const_cast<TreeTy*>(R)) {
+    if (Root) { Root->retain(); }
+  }
+
+  ImmutableMap(const ImmutableMap &X) : Root(X.Root) {
+    if (Root) { Root->retain(); }
+  }
+
+  ~ImmutableMap() {
+    if (Root) { Root->release(); }
+  }
+
+  ImmutableMap &operator=(const ImmutableMap &X) {
+    if (Root != X.Root) {
+      if (X.Root) { X.Root->retain(); }
+      if (Root) { Root->release(); }
+      Root = X.Root;
+    }
+    return *this;
+  }
+
+  class Factory {
+    typename TreeTy::Factory F;
+    const bool Canonicalize;
+
+  public:
+    Factory(bool canonicalize = true) : Canonicalize(canonicalize) {}
+
+    Factory(BumpPtrAllocator &Alloc, bool canonicalize = true)
+        : F(Alloc), Canonicalize(canonicalize) {}
+
+    Factory(const Factory &) = delete;
+    Factory &operator=(const Factory &) = delete;
+
+    ImmutableMap getEmptyMap() { return ImmutableMap(F.getEmptyTree()); }
+
+    ImmutableMap add(ImmutableMap Old, key_type_ref K, data_type_ref D) {
+      TreeTy *T = F.add(Old.Root, std::pair<key_type,data_type>(K,D));
+      return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
+    }
+
+    ImmutableMap remove(ImmutableMap Old, key_type_ref K) {
+      TreeTy *T = F.remove(Old.Root,K);
+      return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
+    }
+
+    typename TreeTy::Factory *getTreeFactory() const {
+      return const_cast<typename TreeTy::Factory *>(&F);
+    }
+  };
+
+  bool contains(key_type_ref K) const {
+    return Root ? Root->contains(K) : false;
+  }
+
+  bool operator==(const ImmutableMap &RHS) const {
+    return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
+  }
+
+  bool operator!=(const ImmutableMap &RHS) const {
+    return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
+  }
+
+  TreeTy *getRoot() const {
+    if (Root) { Root->retain(); }
+    return Root;
+  }
+
+  TreeTy *getRootWithoutRetain() const { return Root; }
+
+  void manualRetain() {
+    if (Root) Root->retain();
+  }
+
+  void manualRelease() {
+    if (Root) Root->release();
+  }
+
+  bool isEmpty() const { return !Root; }
+
+  //===--------------------------------------------------===//
+  // Foreach - A limited form of map iteration.
+  //===--------------------------------------------------===//
+
+private:
+  template <typename Callback>
+  struct CBWrapper {
+    Callback C;
+
+    void operator()(value_type_ref V) { C(V.first,V.second); }
+  };
+
+  template <typename Callback>
+  struct CBWrapperRef {
+    Callback &C;
+
+    CBWrapperRef(Callback& c) : C(c) {}
+
+    void operator()(value_type_ref V) { C(V.first,V.second); }
+  };
+
+public:
+  template <typename Callback>
+  void foreach(Callback& C) {
+    if (Root) {
+      CBWrapperRef<Callback> CB(C);
+      Root->foreach(CB);
+    }
+  }
+
+  template <typename Callback>
+  void foreach() {
+    if (Root) {
+      CBWrapper<Callback> CB;
+      Root->foreach(CB);
+    }
+  }
+
+  //===--------------------------------------------------===//
+  // For testing.
+  //===--------------------------------------------------===//
+
+  void verify() const { if (Root) Root->verify(); }
+
+  //===--------------------------------------------------===//
+  // Iterators.
+  //===--------------------------------------------------===//
+
+  class iterator : public ImutAVLValueIterator<ImmutableMap> {
+    friend class ImmutableMap;
+
+    iterator() = default;
+    explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {}
+
+  public:
+    key_type_ref getKey() const { return (*this)->first; }
+    data_type_ref getData() const { return (*this)->second; }
+  };
+
+  iterator begin() const { return iterator(Root); }
+  iterator end() const { return iterator(); }
+
+  data_type* lookup(key_type_ref K) const {
+    if (Root) {
+      TreeTy* T = Root->find(K);
+      if (T) return &T->getValue().second;
+    }
+
+    return nullptr;
+  }
+
+  /// getMaxElement - Returns the <key,value> pair in the ImmutableMap for
+  ///  which key is the highest in the ordering of keys in the map.  This
+  ///  method returns NULL if the map is empty.
+  value_type* getMaxElement() const {
+    return Root ? &(Root->getMaxElement()->getValue()) : nullptr;
+  }
+
+  //===--------------------------------------------------===//
+  // Utility methods.
+  //===--------------------------------------------------===//
+
+  unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+  static inline void Profile(FoldingSetNodeID& ID, const ImmutableMap& M) {
+    ID.AddPointer(M.Root);
+  }
+
+  inline void Profile(FoldingSetNodeID& ID) const {
+    return Profile(ID,*this);
+  }
+};
+
+// NOTE: This will possibly become the new implementation of ImmutableMap some day.
+template <typename KeyT, typename ValT,
+typename ValInfo = ImutKeyValueInfo<KeyT,ValT>>
+class ImmutableMapRef {
+public:
+  using value_type = typename ValInfo::value_type;
+  using value_type_ref = typename ValInfo::value_type_ref;
+  using key_type = typename ValInfo::key_type;
+  using key_type_ref = typename ValInfo::key_type_ref;
+  using data_type = typename ValInfo::data_type;
+  using data_type_ref = typename ValInfo::data_type_ref;
+  using TreeTy = ImutAVLTree<ValInfo>;
+  using FactoryTy = typename TreeTy::Factory;
+
+protected:
+  TreeTy *Root;
+  FactoryTy *Factory;
+
+public:
+  /// Constructs a map from a pointer to a tree root.  In general one
+  /// should use a Factory object to create maps instead of directly
+  /// invoking the constructor, but there are cases where make this
+  /// constructor public is useful.
+  explicit ImmutableMapRef(const TreeTy *R, FactoryTy *F)
+      : Root(const_cast<TreeTy *>(R)), Factory(F) {
+    if (Root) {
+      Root->retain();
+    }
+  }
+
+  explicit ImmutableMapRef(const ImmutableMap<KeyT, ValT> &X,
+                           typename ImmutableMap<KeyT, ValT>::Factory &F)
+    : Root(X.getRootWithoutRetain()),
+      Factory(F.getTreeFactory()) {
+    if (Root) { Root->retain(); }
+  }
+
+  ImmutableMapRef(const ImmutableMapRef &X) : Root(X.Root), Factory(X.Factory) {
+    if (Root) {
+      Root->retain();
+    }
+  }
+
+  ~ImmutableMapRef() {
+    if (Root)
+      Root->release();
+  }
+
+  ImmutableMapRef &operator=(const ImmutableMapRef &X) {
+    if (Root != X.Root) {
+      if (X.Root)
+        X.Root->retain();
+
+      if (Root)
+        Root->release();
+
+      Root = X.Root;
+      Factory = X.Factory;
+    }
+    return *this;
+  }
+
+  static inline ImmutableMapRef getEmptyMap(FactoryTy *F) {
+    return ImmutableMapRef(0, F);
+  }
+
+  void manualRetain() {
+    if (Root) Root->retain();
+  }
+
+  void manualRelease() {
+    if (Root) Root->release();
+  }
+
+  ImmutableMapRef add(key_type_ref K, data_type_ref D) const {
+    TreeTy *NewT = Factory->add(Root, std::pair<key_type, data_type>(K, D));
+    return ImmutableMapRef(NewT, Factory);
+  }
+
+  ImmutableMapRef remove(key_type_ref K) const {
+    TreeTy *NewT = Factory->remove(Root, K);
+    return ImmutableMapRef(NewT, Factory);
+  }
+
+  bool contains(key_type_ref K) const {
+    return Root ? Root->contains(K) : false;
+  }
+
+  ImmutableMap<KeyT, ValT> asImmutableMap() const {
+    return ImmutableMap<KeyT, ValT>(Factory->getCanonicalTree(Root));
+  }
+
+  bool operator==(const ImmutableMapRef &RHS) const {
+    return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
+  }
+
+  bool operator!=(const ImmutableMapRef &RHS) const {
+    return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
+  }
+
+  bool isEmpty() const { return !Root; }
+
+  //===--------------------------------------------------===//
+  // For testing.
+  //===--------------------------------------------------===//
+
+  void verify() const {
+    if (Root)
+      Root->verify();
+  }
+
+  //===--------------------------------------------------===//
+  // Iterators.
+  //===--------------------------------------------------===//
+
+  class iterator : public ImutAVLValueIterator<ImmutableMapRef> {
+    friend class ImmutableMapRef;
+
+    iterator() = default;
+    explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {}
+
+  public:
+    key_type_ref getKey() const { return (*this)->first; }
+    data_type_ref getData() const { return (*this)->second; }
+  };
+
+  iterator begin() const { return iterator(Root); }
+  iterator end() const { return iterator(); }
+
+  data_type *lookup(key_type_ref K) const {
+    if (Root) {
+      TreeTy* T = Root->find(K);
+      if (T) return &T->getValue().second;
+    }
+
+    return nullptr;
+  }
+
+  /// getMaxElement - Returns the <key,value> pair in the ImmutableMap for
+  ///  which key is the highest in the ordering of keys in the map.  This
+  ///  method returns NULL if the map is empty.
+  value_type* getMaxElement() const {
+    return Root ? &(Root->getMaxElement()->getValue()) : 0;
+  }
+
+  //===--------------------------------------------------===//
+  // Utility methods.
+  //===--------------------------------------------------===//
+
+  unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+  static inline void Profile(FoldingSetNodeID &ID, const ImmutableMapRef &M) {
+    ID.AddPointer(M.Root);
+  }
+
+  inline void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_IMMUTABLEMAP_H
diff --git a/linux-x64/clang/include/llvm/ADT/ImmutableSet.h b/linux-x64/clang/include/llvm/ADT/ImmutableSet.h
new file mode 100644
index 0000000..9d580c5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ImmutableSet.h
@@ -0,0 +1,1224 @@
+//===--- ImmutableSet.h - Immutable (functional) set interface --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ImutAVLTree and ImmutableSet classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_IMMUTABLESET_H
+#define LLVM_ADT_IMMUTABLESET_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstdint>
+#include <functional>
+#include <iterator>
+#include <new>
+#include <vector>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// Immutable AVL-Tree Definition.
+//===----------------------------------------------------------------------===//
+
+template <typename ImutInfo> class ImutAVLFactory;
+template <typename ImutInfo> class ImutIntervalAVLFactory;
+template <typename ImutInfo> class ImutAVLTreeInOrderIterator;
+template <typename ImutInfo> class ImutAVLTreeGenericIterator;
+
+template <typename ImutInfo >
+class ImutAVLTree {
+public:
+  using key_type_ref = typename ImutInfo::key_type_ref;
+  using value_type = typename ImutInfo::value_type;
+  using value_type_ref = typename ImutInfo::value_type_ref;
+  using Factory = ImutAVLFactory<ImutInfo>;
+  using iterator = ImutAVLTreeInOrderIterator<ImutInfo>;
+
+  friend class ImutAVLFactory<ImutInfo>;
+  friend class ImutIntervalAVLFactory<ImutInfo>;
+  friend class ImutAVLTreeGenericIterator<ImutInfo>;
+
+  //===----------------------------------------------------===//
+  // Public Interface.
+  //===----------------------------------------------------===//
+
+  /// Return a pointer to the left subtree.  This value
+  ///  is NULL if there is no left subtree.
+  ImutAVLTree *getLeft() const { return left; }
+
+  /// Return a pointer to the right subtree.  This value is
+  ///  NULL if there is no right subtree.
+  ImutAVLTree *getRight() const { return right; }
+
+  /// getHeight - Returns the height of the tree.  A tree with no subtrees
+  ///  has a height of 1.
+  unsigned getHeight() const { return height; }
+
+  /// getValue - Returns the data value associated with the tree node.
+  const value_type& getValue() const { return value; }
+
+  /// find - Finds the subtree associated with the specified key value.
+  ///  This method returns NULL if no matching subtree is found.
+  ImutAVLTree* find(key_type_ref K) {
+    ImutAVLTree *T = this;
+    while (T) {
+      key_type_ref CurrentKey = ImutInfo::KeyOfValue(T->getValue());
+      if (ImutInfo::isEqual(K,CurrentKey))
+        return T;
+      else if (ImutInfo::isLess(K,CurrentKey))
+        T = T->getLeft();
+      else
+        T = T->getRight();
+    }
+    return nullptr;
+  }
+
+  /// getMaxElement - Find the subtree associated with the highest ranged
+  ///  key value.
+  ImutAVLTree* getMaxElement() {
+    ImutAVLTree *T = this;
+    ImutAVLTree *Right = T->getRight();
+    while (Right) { T = Right; Right = T->getRight(); }
+    return T;
+  }
+
+  /// size - Returns the number of nodes in the tree, which includes
+  ///  both leaves and non-leaf nodes.
+  unsigned size() const {
+    unsigned n = 1;
+    if (const ImutAVLTree* L = getLeft())
+      n += L->size();
+    if (const ImutAVLTree* R = getRight())
+      n += R->size();
+    return n;
+  }
+
+  /// begin - Returns an iterator that iterates over the nodes of the tree
+  ///  in an inorder traversal.  The returned iterator thus refers to the
+  ///  the tree node with the minimum data element.
+  iterator begin() const { return iterator(this); }
+
+  /// end - Returns an iterator for the tree that denotes the end of an
+  ///  inorder traversal.
+  iterator end() const { return iterator(); }
+
+  bool isElementEqual(value_type_ref V) const {
+    // Compare the keys.
+    if (!ImutInfo::isEqual(ImutInfo::KeyOfValue(getValue()),
+                           ImutInfo::KeyOfValue(V)))
+      return false;
+
+    // Also compare the data values.
+    if (!ImutInfo::isDataEqual(ImutInfo::DataOfValue(getValue()),
+                               ImutInfo::DataOfValue(V)))
+      return false;
+
+    return true;
+  }
+
+  bool isElementEqual(const ImutAVLTree* RHS) const {
+    return isElementEqual(RHS->getValue());
+  }
+
+  /// isEqual - Compares two trees for structural equality and returns true
+  ///   if they are equal.  This worst case performance of this operation is
+  //    linear in the sizes of the trees.
+  bool isEqual(const ImutAVLTree& RHS) const {
+    if (&RHS == this)
+      return true;
+
+    iterator LItr = begin(), LEnd = end();
+    iterator RItr = RHS.begin(), REnd = RHS.end();
+
+    while (LItr != LEnd && RItr != REnd) {
+      if (&*LItr == &*RItr) {
+        LItr.skipSubTree();
+        RItr.skipSubTree();
+        continue;
+      }
+
+      if (!LItr->isElementEqual(&*RItr))
+        return false;
+
+      ++LItr;
+      ++RItr;
+    }
+
+    return LItr == LEnd && RItr == REnd;
+  }
+
+  /// isNotEqual - Compares two trees for structural inequality.  Performance
+  ///  is the same is isEqual.
+  bool isNotEqual(const ImutAVLTree& RHS) const { return !isEqual(RHS); }
+
+  /// contains - Returns true if this tree contains a subtree (node) that
+  ///  has an data element that matches the specified key.  Complexity
+  ///  is logarithmic in the size of the tree.
+  bool contains(key_type_ref K) { return (bool) find(K); }
+
+  /// foreach - A member template the accepts invokes operator() on a functor
+  ///  object (specifed by Callback) for every node/subtree in the tree.
+  ///  Nodes are visited using an inorder traversal.
+  template <typename Callback>
+  void foreach(Callback& C) {
+    if (ImutAVLTree* L = getLeft())
+      L->foreach(C);
+
+    C(value);
+
+    if (ImutAVLTree* R = getRight())
+      R->foreach(C);
+  }
+
+  /// validateTree - A utility method that checks that the balancing and
+  ///  ordering invariants of the tree are satisifed.  It is a recursive
+  ///  method that returns the height of the tree, which is then consumed
+  ///  by the enclosing validateTree call.  External callers should ignore the
+  ///  return value.  An invalid tree will cause an assertion to fire in
+  ///  a debug build.
+  unsigned validateTree() const {
+    unsigned HL = getLeft() ? getLeft()->validateTree() : 0;
+    unsigned HR = getRight() ? getRight()->validateTree() : 0;
+    (void) HL;
+    (void) HR;
+
+    assert(getHeight() == ( HL > HR ? HL : HR ) + 1
+            && "Height calculation wrong");
+
+    assert((HL > HR ? HL-HR : HR-HL) <= 2
+           && "Balancing invariant violated");
+
+    assert((!getLeft() ||
+            ImutInfo::isLess(ImutInfo::KeyOfValue(getLeft()->getValue()),
+                             ImutInfo::KeyOfValue(getValue()))) &&
+           "Value in left child is not less that current value");
+
+
+    assert(!(getRight() ||
+             ImutInfo::isLess(ImutInfo::KeyOfValue(getValue()),
+                              ImutInfo::KeyOfValue(getRight()->getValue()))) &&
+           "Current value is not less that value of right child");
+
+    return getHeight();
+  }
+
+  //===----------------------------------------------------===//
+  // Internal values.
+  //===----------------------------------------------------===//
+
+private:
+  Factory *factory;
+  ImutAVLTree *left;
+  ImutAVLTree *right;
+  ImutAVLTree *prev = nullptr;
+  ImutAVLTree *next = nullptr;
+
+  unsigned height : 28;
+  bool IsMutable : 1;
+  bool IsDigestCached : 1;
+  bool IsCanonicalized : 1;
+
+  value_type value;
+  uint32_t digest = 0;
+  uint32_t refCount = 0;
+
+  //===----------------------------------------------------===//
+  // Internal methods (node manipulation; used by Factory).
+  //===----------------------------------------------------===//
+
+private:
+  /// ImutAVLTree - Internal constructor that is only called by
+  ///   ImutAVLFactory.
+  ImutAVLTree(Factory *f, ImutAVLTree* l, ImutAVLTree* r, value_type_ref v,
+              unsigned height)
+    : factory(f), left(l), right(r), height(height), IsMutable(true),
+      IsDigestCached(false), IsCanonicalized(false), value(v)
+  {
+    if (left) left->retain();
+    if (right) right->retain();
+  }
+
+  /// isMutable - Returns true if the left and right subtree references
+  ///  (as well as height) can be changed.  If this method returns false,
+  ///  the tree is truly immutable.  Trees returned from an ImutAVLFactory
+  ///  object should always have this method return true.  Further, if this
+  ///  method returns false for an instance of ImutAVLTree, all subtrees
+  ///  will also have this method return false.  The converse is not true.
+  bool isMutable() const { return IsMutable; }
+
+  /// hasCachedDigest - Returns true if the digest for this tree is cached.
+  ///  This can only be true if the tree is immutable.
+  bool hasCachedDigest() const { return IsDigestCached; }
+
+  //===----------------------------------------------------===//
+  // Mutating operations.  A tree root can be manipulated as
+  // long as its reference has not "escaped" from internal
+  // methods of a factory object (see below).  When a tree
+  // pointer is externally viewable by client code, the
+  // internal "mutable bit" is cleared to mark the tree
+  // immutable.  Note that a tree that still has its mutable
+  // bit set may have children (subtrees) that are themselves
+  // immutable.
+  //===----------------------------------------------------===//
+
+  /// markImmutable - Clears the mutable flag for a tree.  After this happens,
+  ///   it is an error to call setLeft(), setRight(), and setHeight().
+  void markImmutable() {
+    assert(isMutable() && "Mutable flag already removed.");
+    IsMutable = false;
+  }
+
+  /// markedCachedDigest - Clears the NoCachedDigest flag for a tree.
+  void markedCachedDigest() {
+    assert(!hasCachedDigest() && "NoCachedDigest flag already removed.");
+    IsDigestCached = true;
+  }
+
+  /// setHeight - Changes the height of the tree.  Used internally by
+  ///  ImutAVLFactory.
+  void setHeight(unsigned h) {
+    assert(isMutable() && "Only a mutable tree can have its height changed.");
+    height = h;
+  }
+
+  static uint32_t computeDigest(ImutAVLTree *L, ImutAVLTree *R,
+                                value_type_ref V) {
+    uint32_t digest = 0;
+
+    if (L)
+      digest += L->computeDigest();
+
+    // Compute digest of stored data.
+    FoldingSetNodeID ID;
+    ImutInfo::Profile(ID,V);
+    digest += ID.ComputeHash();
+
+    if (R)
+      digest += R->computeDigest();
+
+    return digest;
+  }
+
+  uint32_t computeDigest() {
+    // Check the lowest bit to determine if digest has actually been
+    // pre-computed.
+    if (hasCachedDigest())
+      return digest;
+
+    uint32_t X = computeDigest(getLeft(), getRight(), getValue());
+    digest = X;
+    markedCachedDigest();
+    return X;
+  }
+
+  //===----------------------------------------------------===//
+  // Reference count operations.
+  //===----------------------------------------------------===//
+
+public:
+  void retain() { ++refCount; }
+
+  void release() {
+    assert(refCount > 0);
+    if (--refCount == 0)
+      destroy();
+  }
+
+  void destroy() {
+    if (left)
+      left->release();
+    if (right)
+      right->release();
+    if (IsCanonicalized) {
+      if (next)
+        next->prev = prev;
+
+      if (prev)
+        prev->next = next;
+      else
+        factory->Cache[factory->maskCacheIndex(computeDigest())] = next;
+    }
+
+    // We need to clear the mutability bit in case we are
+    // destroying the node as part of a sweep in ImutAVLFactory::recoverNodes().
+    IsMutable = false;
+    factory->freeNodes.push_back(this);
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// Immutable AVL-Tree Factory class.
+//===----------------------------------------------------------------------===//
+
+template <typename ImutInfo >
+class ImutAVLFactory {
+  friend class ImutAVLTree<ImutInfo>;
+
+  using TreeTy = ImutAVLTree<ImutInfo>;
+  using value_type_ref = typename TreeTy::value_type_ref;
+  using key_type_ref = typename TreeTy::key_type_ref;
+  using CacheTy = DenseMap<unsigned, TreeTy*>;
+
+  CacheTy Cache;
+  uintptr_t Allocator;
+  std::vector<TreeTy*> createdNodes;
+  std::vector<TreeTy*> freeNodes;
+
+  bool ownsAllocator() const {
+    return (Allocator & 0x1) == 0;
+  }
+
+  BumpPtrAllocator& getAllocator() const {
+    return *reinterpret_cast<BumpPtrAllocator*>(Allocator & ~0x1);
+  }
+
+  //===--------------------------------------------------===//
+  // Public interface.
+  //===--------------------------------------------------===//
+
+public:
+  ImutAVLFactory()
+    : Allocator(reinterpret_cast<uintptr_t>(new BumpPtrAllocator())) {}
+
+  ImutAVLFactory(BumpPtrAllocator& Alloc)
+    : Allocator(reinterpret_cast<uintptr_t>(&Alloc) | 0x1) {}
+
+  ~ImutAVLFactory() {
+    if (ownsAllocator()) delete &getAllocator();
+  }
+
+  TreeTy* add(TreeTy* T, value_type_ref V) {
+    T = add_internal(V,T);
+    markImmutable(T);
+    recoverNodes();
+    return T;
+  }
+
+  TreeTy* remove(TreeTy* T, key_type_ref V) {
+    T = remove_internal(V,T);
+    markImmutable(T);
+    recoverNodes();
+    return T;
+  }
+
+  TreeTy* getEmptyTree() const { return nullptr; }
+
+protected:
+  //===--------------------------------------------------===//
+  // A bunch of quick helper functions used for reasoning
+  // about the properties of trees and their children.
+  // These have succinct names so that the balancing code
+  // is as terse (and readable) as possible.
+  //===--------------------------------------------------===//
+
+  bool            isEmpty(TreeTy* T) const { return !T; }
+  unsigned        getHeight(TreeTy* T) const { return T ? T->getHeight() : 0; }
+  TreeTy*         getLeft(TreeTy* T) const { return T->getLeft(); }
+  TreeTy*         getRight(TreeTy* T) const { return T->getRight(); }
+  value_type_ref  getValue(TreeTy* T) const { return T->value; }
+
+  // Make sure the index is not the Tombstone or Entry key of the DenseMap.
+  static unsigned maskCacheIndex(unsigned I) { return (I & ~0x02); }
+
+  unsigned incrementHeight(TreeTy* L, TreeTy* R) const {
+    unsigned hl = getHeight(L);
+    unsigned hr = getHeight(R);
+    return (hl > hr ? hl : hr) + 1;
+  }
+
+  static bool compareTreeWithSection(TreeTy* T,
+                                     typename TreeTy::iterator& TI,
+                                     typename TreeTy::iterator& TE) {
+    typename TreeTy::iterator I = T->begin(), E = T->end();
+    for ( ; I!=E ; ++I, ++TI) {
+      if (TI == TE || !I->isElementEqual(&*TI))
+        return false;
+    }
+    return true;
+  }
+
+  //===--------------------------------------------------===//
+  // "createNode" is used to generate new tree roots that link
+  // to other trees.  The functon may also simply move links
+  // in an existing root if that root is still marked mutable.
+  // This is necessary because otherwise our balancing code
+  // would leak memory as it would create nodes that are
+  // then discarded later before the finished tree is
+  // returned to the caller.
+  //===--------------------------------------------------===//
+
+  TreeTy* createNode(TreeTy* L, value_type_ref V, TreeTy* R) {
+    BumpPtrAllocator& A = getAllocator();
+    TreeTy* T;
+    if (!freeNodes.empty()) {
+      T = freeNodes.back();
+      freeNodes.pop_back();
+      assert(T != L);
+      assert(T != R);
+    } else {
+      T = (TreeTy*) A.Allocate<TreeTy>();
+    }
+    new (T) TreeTy(this, L, R, V, incrementHeight(L,R));
+    createdNodes.push_back(T);
+    return T;
+  }
+
+  TreeTy* createNode(TreeTy* newLeft, TreeTy* oldTree, TreeTy* newRight) {
+    return createNode(newLeft, getValue(oldTree), newRight);
+  }
+
+  void recoverNodes() {
+    for (unsigned i = 0, n = createdNodes.size(); i < n; ++i) {
+      TreeTy *N = createdNodes[i];
+      if (N->isMutable() && N->refCount == 0)
+        N->destroy();
+    }
+    createdNodes.clear();
+  }
+
+  /// balanceTree - Used by add_internal and remove_internal to
+  ///  balance a newly created tree.
+  TreeTy* balanceTree(TreeTy* L, value_type_ref V, TreeTy* R) {
+    unsigned hl = getHeight(L);
+    unsigned hr = getHeight(R);
+
+    if (hl > hr + 2) {
+      assert(!isEmpty(L) && "Left tree cannot be empty to have a height >= 2");
+
+      TreeTy *LL = getLeft(L);
+      TreeTy *LR = getRight(L);
+
+      if (getHeight(LL) >= getHeight(LR))
+        return createNode(LL, L, createNode(LR,V,R));
+
+      assert(!isEmpty(LR) && "LR cannot be empty because it has a height >= 1");
+
+      TreeTy *LRL = getLeft(LR);
+      TreeTy *LRR = getRight(LR);
+
+      return createNode(createNode(LL,L,LRL), LR, createNode(LRR,V,R));
+    }
+
+    if (hr > hl + 2) {
+      assert(!isEmpty(R) && "Right tree cannot be empty to have a height >= 2");
+
+      TreeTy *RL = getLeft(R);
+      TreeTy *RR = getRight(R);
+
+      if (getHeight(RR) >= getHeight(RL))
+        return createNode(createNode(L,V,RL), R, RR);
+
+      assert(!isEmpty(RL) && "RL cannot be empty because it has a height >= 1");
+
+      TreeTy *RLL = getLeft(RL);
+      TreeTy *RLR = getRight(RL);
+
+      return createNode(createNode(L,V,RLL), RL, createNode(RLR,R,RR));
+    }
+
+    return createNode(L,V,R);
+  }
+
+  /// add_internal - Creates a new tree that includes the specified
+  ///  data and the data from the original tree.  If the original tree
+  ///  already contained the data item, the original tree is returned.
+  TreeTy* add_internal(value_type_ref V, TreeTy* T) {
+    if (isEmpty(T))
+      return createNode(T, V, T);
+    assert(!T->isMutable());
+
+    key_type_ref K = ImutInfo::KeyOfValue(V);
+    key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T));
+
+    if (ImutInfo::isEqual(K,KCurrent))
+      return createNode(getLeft(T), V, getRight(T));
+    else if (ImutInfo::isLess(K,KCurrent))
+      return balanceTree(add_internal(V, getLeft(T)), getValue(T), getRight(T));
+    else
+      return balanceTree(getLeft(T), getValue(T), add_internal(V, getRight(T)));
+  }
+
+  /// remove_internal - Creates a new tree that includes all the data
+  ///  from the original tree except the specified data.  If the
+  ///  specified data did not exist in the original tree, the original
+  ///  tree is returned.
+  TreeTy* remove_internal(key_type_ref K, TreeTy* T) {
+    if (isEmpty(T))
+      return T;
+
+    assert(!T->isMutable());
+
+    key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T));
+
+    if (ImutInfo::isEqual(K,KCurrent)) {
+      return combineTrees(getLeft(T), getRight(T));
+    } else if (ImutInfo::isLess(K,KCurrent)) {
+      return balanceTree(remove_internal(K, getLeft(T)),
+                                            getValue(T), getRight(T));
+    } else {
+      return balanceTree(getLeft(T), getValue(T),
+                         remove_internal(K, getRight(T)));
+    }
+  }
+
+  TreeTy* combineTrees(TreeTy* L, TreeTy* R) {
+    if (isEmpty(L))
+      return R;
+    if (isEmpty(R))
+      return L;
+    TreeTy* OldNode;
+    TreeTy* newRight = removeMinBinding(R,OldNode);
+    return balanceTree(L, getValue(OldNode), newRight);
+  }
+
+  TreeTy* removeMinBinding(TreeTy* T, TreeTy*& Noderemoved) {
+    assert(!isEmpty(T));
+    if (isEmpty(getLeft(T))) {
+      Noderemoved = T;
+      return getRight(T);
+    }
+    return balanceTree(removeMinBinding(getLeft(T), Noderemoved),
+                       getValue(T), getRight(T));
+  }
+
+  /// markImmutable - Clears the mutable bits of a root and all of its
+  ///  descendants.
+  void markImmutable(TreeTy* T) {
+    if (!T || !T->isMutable())
+      return;
+    T->markImmutable();
+    markImmutable(getLeft(T));
+    markImmutable(getRight(T));
+  }
+
+public:
+  TreeTy *getCanonicalTree(TreeTy *TNew) {
+    if (!TNew)
+      return nullptr;
+
+    if (TNew->IsCanonicalized)
+      return TNew;
+
+    // Search the hashtable for another tree with the same digest, and
+    // if find a collision compare those trees by their contents.
+    unsigned digest = TNew->computeDigest();
+    TreeTy *&entry = Cache[maskCacheIndex(digest)];
+    do {
+      if (!entry)
+        break;
+      for (TreeTy *T = entry ; T != nullptr; T = T->next) {
+        // Compare the Contents('T') with Contents('TNew')
+        typename TreeTy::iterator TI = T->begin(), TE = T->end();
+        if (!compareTreeWithSection(TNew, TI, TE))
+          continue;
+        if (TI != TE)
+          continue; // T has more contents than TNew.
+        // Trees did match!  Return 'T'.
+        if (TNew->refCount == 0)
+          TNew->destroy();
+        return T;
+      }
+      entry->prev = TNew;
+      TNew->next = entry;
+    }
+    while (false);
+
+    entry = TNew;
+    TNew->IsCanonicalized = true;
+    return TNew;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// Immutable AVL-Tree Iterators.
+//===----------------------------------------------------------------------===//
+
+template <typename ImutInfo>
+class ImutAVLTreeGenericIterator
+    : public std::iterator<std::bidirectional_iterator_tag,
+                           ImutAVLTree<ImutInfo>> {
+  SmallVector<uintptr_t,20> stack;
+
+public:
+  enum VisitFlag { VisitedNone=0x0, VisitedLeft=0x1, VisitedRight=0x3,
+                   Flags=0x3 };
+
+  using TreeTy = ImutAVLTree<ImutInfo>;
+
+  ImutAVLTreeGenericIterator() = default;
+  ImutAVLTreeGenericIterator(const TreeTy *Root) {
+    if (Root) stack.push_back(reinterpret_cast<uintptr_t>(Root));
+  }
+
+  TreeTy &operator*() const {
+    assert(!stack.empty());
+    return *reinterpret_cast<TreeTy *>(stack.back() & ~Flags);
+  }
+  TreeTy *operator->() const { return &*this; }
+
+  uintptr_t getVisitState() const {
+    assert(!stack.empty());
+    return stack.back() & Flags;
+  }
+
+  bool atEnd() const { return stack.empty(); }
+
+  bool atBeginning() const {
+    return stack.size() == 1 && getVisitState() == VisitedNone;
+  }
+
+  void skipToParent() {
+    assert(!stack.empty());
+    stack.pop_back();
+    if (stack.empty())
+      return;
+    switch (getVisitState()) {
+      case VisitedNone:
+        stack.back() |= VisitedLeft;
+        break;
+      case VisitedLeft:
+        stack.back() |= VisitedRight;
+        break;
+      default:
+        llvm_unreachable("Unreachable.");
+    }
+  }
+
+  bool operator==(const ImutAVLTreeGenericIterator &x) const {
+    return stack == x.stack;
+  }
+
+  bool operator!=(const ImutAVLTreeGenericIterator &x) const {
+    return !(*this == x);
+  }
+
+  ImutAVLTreeGenericIterator &operator++() {
+    assert(!stack.empty());
+    TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
+    assert(Current);
+    switch (getVisitState()) {
+      case VisitedNone:
+        if (TreeTy* L = Current->getLeft())
+          stack.push_back(reinterpret_cast<uintptr_t>(L));
+        else
+          stack.back() |= VisitedLeft;
+        break;
+      case VisitedLeft:
+        if (TreeTy* R = Current->getRight())
+          stack.push_back(reinterpret_cast<uintptr_t>(R));
+        else
+          stack.back() |= VisitedRight;
+        break;
+      case VisitedRight:
+        skipToParent();
+        break;
+      default:
+        llvm_unreachable("Unreachable.");
+    }
+    return *this;
+  }
+
+  ImutAVLTreeGenericIterator &operator--() {
+    assert(!stack.empty());
+    TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
+    assert(Current);
+    switch (getVisitState()) {
+      case VisitedNone:
+        stack.pop_back();
+        break;
+      case VisitedLeft:
+        stack.back() &= ~Flags; // Set state to "VisitedNone."
+        if (TreeTy* L = Current->getLeft())
+          stack.push_back(reinterpret_cast<uintptr_t>(L) | VisitedRight);
+        break;
+      case VisitedRight:
+        stack.back() &= ~Flags;
+        stack.back() |= VisitedLeft;
+        if (TreeTy* R = Current->getRight())
+          stack.push_back(reinterpret_cast<uintptr_t>(R) | VisitedRight);
+        break;
+      default:
+        llvm_unreachable("Unreachable.");
+    }
+    return *this;
+  }
+};
+
+template <typename ImutInfo>
+class ImutAVLTreeInOrderIterator
+    : public std::iterator<std::bidirectional_iterator_tag,
+                           ImutAVLTree<ImutInfo>> {
+  using InternalIteratorTy = ImutAVLTreeGenericIterator<ImutInfo>;
+
+  InternalIteratorTy InternalItr;
+
+public:
+  using TreeTy = ImutAVLTree<ImutInfo>;
+
+  ImutAVLTreeInOrderIterator(const TreeTy* Root) : InternalItr(Root) {
+    if (Root)
+      ++*this; // Advance to first element.
+  }
+
+  ImutAVLTreeInOrderIterator() : InternalItr() {}
+
+  bool operator==(const ImutAVLTreeInOrderIterator &x) const {
+    return InternalItr == x.InternalItr;
+  }
+
+  bool operator!=(const ImutAVLTreeInOrderIterator &x) const {
+    return !(*this == x);
+  }
+
+  TreeTy &operator*() const { return *InternalItr; }
+  TreeTy *operator->() const { return &*InternalItr; }
+
+  ImutAVLTreeInOrderIterator &operator++() {
+    do ++InternalItr;
+    while (!InternalItr.atEnd() &&
+           InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft);
+
+    return *this;
+  }
+
+  ImutAVLTreeInOrderIterator &operator--() {
+    do --InternalItr;
+    while (!InternalItr.atBeginning() &&
+           InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft);
+
+    return *this;
+  }
+
+  void skipSubTree() {
+    InternalItr.skipToParent();
+
+    while (!InternalItr.atEnd() &&
+           InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft)
+      ++InternalItr;
+  }
+};
+
+/// Generic iterator that wraps a T::TreeTy::iterator and exposes
+/// iterator::getValue() on dereference.
+template <typename T>
+struct ImutAVLValueIterator
+    : iterator_adaptor_base<
+          ImutAVLValueIterator<T>, typename T::TreeTy::iterator,
+          typename std::iterator_traits<
+              typename T::TreeTy::iterator>::iterator_category,
+          const typename T::value_type> {
+  ImutAVLValueIterator() = default;
+  explicit ImutAVLValueIterator(typename T::TreeTy *Tree)
+      : ImutAVLValueIterator::iterator_adaptor_base(Tree) {}
+
+  typename ImutAVLValueIterator::reference operator*() const {
+    return this->I->getValue();
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// Trait classes for Profile information.
+//===----------------------------------------------------------------------===//
+
+/// Generic profile template.  The default behavior is to invoke the
+/// profile method of an object.  Specializations for primitive integers
+/// and generic handling of pointers is done below.
+template <typename T>
+struct ImutProfileInfo {
+  using value_type = const T;
+  using value_type_ref = const T&;
+
+  static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+    FoldingSetTrait<T>::Profile(X,ID);
+  }
+};
+
+/// Profile traits for integers.
+template <typename T>
+struct ImutProfileInteger {
+  using value_type = const T;
+  using value_type_ref = const T&;
+
+  static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+    ID.AddInteger(X);
+  }
+};
+
+#define PROFILE_INTEGER_INFO(X)\
+template<> struct ImutProfileInfo<X> : ImutProfileInteger<X> {};
+
+PROFILE_INTEGER_INFO(char)
+PROFILE_INTEGER_INFO(unsigned char)
+PROFILE_INTEGER_INFO(short)
+PROFILE_INTEGER_INFO(unsigned short)
+PROFILE_INTEGER_INFO(unsigned)
+PROFILE_INTEGER_INFO(signed)
+PROFILE_INTEGER_INFO(long)
+PROFILE_INTEGER_INFO(unsigned long)
+PROFILE_INTEGER_INFO(long long)
+PROFILE_INTEGER_INFO(unsigned long long)
+
+#undef PROFILE_INTEGER_INFO
+
+/// Profile traits for booleans.
+template <>
+struct ImutProfileInfo<bool> {
+  using value_type = const bool;
+  using value_type_ref = const bool&;
+
+  static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+    ID.AddBoolean(X);
+  }
+};
+
+/// Generic profile trait for pointer types.  We treat pointers as
+/// references to unique objects.
+template <typename T>
+struct ImutProfileInfo<T*> {
+  using value_type = const T*;
+  using value_type_ref = value_type;
+
+  static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+    ID.AddPointer(X);
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// Trait classes that contain element comparison operators and type
+//  definitions used by ImutAVLTree, ImmutableSet, and ImmutableMap.  These
+//  inherit from the profile traits (ImutProfileInfo) to include operations
+//  for element profiling.
+//===----------------------------------------------------------------------===//
+
+/// ImutContainerInfo - Generic definition of comparison operations for
+///   elements of immutable containers that defaults to using
+///   std::equal_to<> and std::less<> to perform comparison of elements.
+template <typename T>
+struct ImutContainerInfo : public ImutProfileInfo<T> {
+  using value_type = typename ImutProfileInfo<T>::value_type;
+  using value_type_ref = typename ImutProfileInfo<T>::value_type_ref;
+  using key_type = value_type;
+  using key_type_ref = value_type_ref;
+  using data_type = bool;
+  using data_type_ref = bool;
+
+  static key_type_ref KeyOfValue(value_type_ref D) { return D; }
+  static data_type_ref DataOfValue(value_type_ref) { return true; }
+
+  static bool isEqual(key_type_ref LHS, key_type_ref RHS) {
+    return std::equal_to<key_type>()(LHS,RHS);
+  }
+
+  static bool isLess(key_type_ref LHS, key_type_ref RHS) {
+    return std::less<key_type>()(LHS,RHS);
+  }
+
+  static bool isDataEqual(data_type_ref, data_type_ref) { return true; }
+};
+
+/// ImutContainerInfo - Specialization for pointer values to treat pointers
+///  as references to unique objects.  Pointers are thus compared by
+///  their addresses.
+template <typename T>
+struct ImutContainerInfo<T*> : public ImutProfileInfo<T*> {
+  using value_type = typename ImutProfileInfo<T*>::value_type;
+  using value_type_ref = typename ImutProfileInfo<T*>::value_type_ref;
+  using key_type = value_type;
+  using key_type_ref = value_type_ref;
+  using data_type = bool;
+  using data_type_ref = bool;
+
+  static key_type_ref KeyOfValue(value_type_ref D) { return D; }
+  static data_type_ref DataOfValue(value_type_ref) { return true; }
+
+  static bool isEqual(key_type_ref LHS, key_type_ref RHS) { return LHS == RHS; }
+
+  static bool isLess(key_type_ref LHS, key_type_ref RHS) { return LHS < RHS; }
+
+  static bool isDataEqual(data_type_ref, data_type_ref) { return true; }
+};
+
+//===----------------------------------------------------------------------===//
+// Immutable Set
+//===----------------------------------------------------------------------===//
+
+template <typename ValT, typename ValInfo = ImutContainerInfo<ValT>>
+class ImmutableSet {
+public:
+  using value_type = typename ValInfo::value_type;
+  using value_type_ref = typename ValInfo::value_type_ref;
+  using TreeTy = ImutAVLTree<ValInfo>;
+
+private:
+  TreeTy *Root;
+
+public:
+  /// Constructs a set from a pointer to a tree root.  In general one
+  /// should use a Factory object to create sets instead of directly
+  /// invoking the constructor, but there are cases where make this
+  /// constructor public is useful.
+  explicit ImmutableSet(TreeTy* R) : Root(R) {
+    if (Root) { Root->retain(); }
+  }
+
+  ImmutableSet(const ImmutableSet &X) : Root(X.Root) {
+    if (Root) { Root->retain(); }
+  }
+
+  ~ImmutableSet() {
+    if (Root) { Root->release(); }
+  }
+
+  ImmutableSet &operator=(const ImmutableSet &X) {
+    if (Root != X.Root) {
+      if (X.Root) { X.Root->retain(); }
+      if (Root) { Root->release(); }
+      Root = X.Root;
+    }
+    return *this;
+  }
+
+  class Factory {
+    typename TreeTy::Factory F;
+    const bool Canonicalize;
+
+  public:
+    Factory(bool canonicalize = true)
+      : Canonicalize(canonicalize) {}
+
+    Factory(BumpPtrAllocator& Alloc, bool canonicalize = true)
+      : F(Alloc), Canonicalize(canonicalize) {}
+
+    Factory(const Factory& RHS) = delete;
+    void operator=(const Factory& RHS) = delete;
+
+    /// getEmptySet - Returns an immutable set that contains no elements.
+    ImmutableSet getEmptySet() {
+      return ImmutableSet(F.getEmptyTree());
+    }
+
+    /// add - Creates a new immutable set that contains all of the values
+    ///  of the original set with the addition of the specified value.  If
+    ///  the original set already included the value, then the original set is
+    ///  returned and no memory is allocated.  The time and space complexity
+    ///  of this operation is logarithmic in the size of the original set.
+    ///  The memory allocated to represent the set is released when the
+    ///  factory object that created the set is destroyed.
+    ImmutableSet add(ImmutableSet Old, value_type_ref V) {
+      TreeTy *NewT = F.add(Old.Root, V);
+      return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
+    }
+
+    /// remove - Creates a new immutable set that contains all of the values
+    ///  of the original set with the exception of the specified value.  If
+    ///  the original set did not contain the value, the original set is
+    ///  returned and no memory is allocated.  The time and space complexity
+    ///  of this operation is logarithmic in the size of the original set.
+    ///  The memory allocated to represent the set is released when the
+    ///  factory object that created the set is destroyed.
+    ImmutableSet remove(ImmutableSet Old, value_type_ref V) {
+      TreeTy *NewT = F.remove(Old.Root, V);
+      return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
+    }
+
+    BumpPtrAllocator& getAllocator() { return F.getAllocator(); }
+
+    typename TreeTy::Factory *getTreeFactory() const {
+      return const_cast<typename TreeTy::Factory *>(&F);
+    }
+  };
+
+  friend class Factory;
+
+  /// Returns true if the set contains the specified value.
+  bool contains(value_type_ref V) const {
+    return Root ? Root->contains(V) : false;
+  }
+
+  bool operator==(const ImmutableSet &RHS) const {
+    return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
+  }
+
+  bool operator!=(const ImmutableSet &RHS) const {
+    return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
+  }
+
+  TreeTy *getRoot() {
+    if (Root) { Root->retain(); }
+    return Root;
+  }
+
+  TreeTy *getRootWithoutRetain() const {
+    return Root;
+  }
+
+  /// isEmpty - Return true if the set contains no elements.
+  bool isEmpty() const { return !Root; }
+
+  /// isSingleton - Return true if the set contains exactly one element.
+  ///   This method runs in constant time.
+  bool isSingleton() const { return getHeight() == 1; }
+
+  template <typename Callback>
+  void foreach(Callback& C) { if (Root) Root->foreach(C); }
+
+  template <typename Callback>
+  void foreach() { if (Root) { Callback C; Root->foreach(C); } }
+
+  //===--------------------------------------------------===//
+  // Iterators.
+  //===--------------------------------------------------===//
+
+  using iterator = ImutAVLValueIterator<ImmutableSet>;
+
+  iterator begin() const { return iterator(Root); }
+  iterator end() const { return iterator(); }
+
+  //===--------------------------------------------------===//
+  // Utility methods.
+  //===--------------------------------------------------===//
+
+  unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+  static void Profile(FoldingSetNodeID &ID, const ImmutableSet &S) {
+    ID.AddPointer(S.Root);
+  }
+
+  void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
+
+  //===--------------------------------------------------===//
+  // For testing.
+  //===--------------------------------------------------===//
+
+  void validateTree() const { if (Root) Root->validateTree(); }
+};
+
+// NOTE: This may some day replace the current ImmutableSet.
+template <typename ValT, typename ValInfo = ImutContainerInfo<ValT>>
+class ImmutableSetRef {
+public:
+  using value_type = typename ValInfo::value_type;
+  using value_type_ref = typename ValInfo::value_type_ref;
+  using TreeTy = ImutAVLTree<ValInfo>;
+  using FactoryTy = typename TreeTy::Factory;
+
+private:
+  TreeTy *Root;
+  FactoryTy *Factory;
+
+public:
+  /// Constructs a set from a pointer to a tree root.  In general one
+  /// should use a Factory object to create sets instead of directly
+  /// invoking the constructor, but there are cases where make this
+  /// constructor public is useful.
+  explicit ImmutableSetRef(TreeTy* R, FactoryTy *F)
+    : Root(R),
+      Factory(F) {
+    if (Root) { Root->retain(); }
+  }
+
+  ImmutableSetRef(const ImmutableSetRef &X)
+    : Root(X.Root),
+      Factory(X.Factory) {
+    if (Root) { Root->retain(); }
+  }
+
+  ~ImmutableSetRef() {
+    if (Root) { Root->release(); }
+  }
+
+  ImmutableSetRef &operator=(const ImmutableSetRef &X) {
+    if (Root != X.Root) {
+      if (X.Root) { X.Root->retain(); }
+      if (Root) { Root->release(); }
+      Root = X.Root;
+      Factory = X.Factory;
+    }
+    return *this;
+  }
+
+  static ImmutableSetRef getEmptySet(FactoryTy *F) {
+    return ImmutableSetRef(0, F);
+  }
+
+  ImmutableSetRef add(value_type_ref V) {
+    return ImmutableSetRef(Factory->add(Root, V), Factory);
+  }
+
+  ImmutableSetRef remove(value_type_ref V) {
+    return ImmutableSetRef(Factory->remove(Root, V), Factory);
+  }
+
+  /// Returns true if the set contains the specified value.
+  bool contains(value_type_ref V) const {
+    return Root ? Root->contains(V) : false;
+  }
+
+  ImmutableSet<ValT> asImmutableSet(bool canonicalize = true) const {
+    return ImmutableSet<ValT>(canonicalize ?
+                              Factory->getCanonicalTree(Root) : Root);
+  }
+
+  TreeTy *getRootWithoutRetain() const {
+    return Root;
+  }
+
+  bool operator==(const ImmutableSetRef &RHS) const {
+    return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
+  }
+
+  bool operator!=(const ImmutableSetRef &RHS) const {
+    return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
+  }
+
+  /// isEmpty - Return true if the set contains no elements.
+  bool isEmpty() const { return !Root; }
+
+  /// isSingleton - Return true if the set contains exactly one element.
+  ///   This method runs in constant time.
+  bool isSingleton() const { return getHeight() == 1; }
+
+  //===--------------------------------------------------===//
+  // Iterators.
+  //===--------------------------------------------------===//
+
+  using iterator = ImutAVLValueIterator<ImmutableSetRef>;
+
+  iterator begin() const { return iterator(Root); }
+  iterator end() const { return iterator(); }
+
+  //===--------------------------------------------------===//
+  // Utility methods.
+  //===--------------------------------------------------===//
+
+  unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+  static void Profile(FoldingSetNodeID &ID, const ImmutableSetRef &S) {
+    ID.AddPointer(S.Root);
+  }
+
+  void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
+
+  //===--------------------------------------------------===//
+  // For testing.
+  //===--------------------------------------------------===//
+
+  void validateTree() const { if (Root) Root->validateTree(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_IMMUTABLESET_H
diff --git a/linux-x64/clang/include/llvm/ADT/IndexedMap.h b/linux-x64/clang/include/llvm/ADT/IndexedMap.h
new file mode 100644
index 0000000..2ee80d2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/IndexedMap.h
@@ -0,0 +1,85 @@
+//===- llvm/ADT/IndexedMap.h - An index map implementation ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements an indexed map. The index map template takes two
+// types. The first is the mapped type and the second is a functor
+// that maps its argument to a size_t. On instantiation a "null" value
+// can be provided to be used as a "does not exist" indicator in the
+// map. A member function grow() is provided that given the value of
+// the maximally indexed key (the argument of the functor) makes sure
+// the map has enough space for it.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INDEXEDMAP_H
+#define LLVM_ADT_INDEXEDMAP_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include <cassert>
+
+namespace llvm {
+
+template <typename T, typename ToIndexT = identity<unsigned>>
+  class IndexedMap {
+    using IndexT = typename ToIndexT::argument_type;
+    // Prefer SmallVector with zero inline storage over std::vector. IndexedMaps
+    // can grow very large and SmallVector grows more efficiently as long as T
+    // is trivially copyable.
+    using StorageT = SmallVector<T, 0>;
+
+    StorageT storage_;
+    T nullVal_;
+    ToIndexT toIndex_;
+
+  public:
+    IndexedMap() : nullVal_(T()) {}
+
+    explicit IndexedMap(const T& val) : nullVal_(val) {}
+
+    typename StorageT::reference operator[](IndexT n) {
+      assert(toIndex_(n) < storage_.size() && "index out of bounds!");
+      return storage_[toIndex_(n)];
+    }
+
+    typename StorageT::const_reference operator[](IndexT n) const {
+      assert(toIndex_(n) < storage_.size() && "index out of bounds!");
+      return storage_[toIndex_(n)];
+    }
+
+    void reserve(typename StorageT::size_type s) {
+      storage_.reserve(s);
+    }
+
+    void resize(typename StorageT::size_type s) {
+      storage_.resize(s, nullVal_);
+    }
+
+    void clear() {
+      storage_.clear();
+    }
+
+    void grow(IndexT n) {
+      unsigned NewSize = toIndex_(n) + 1;
+      if (NewSize > storage_.size())
+        resize(NewSize);
+    }
+
+    bool inBounds(IndexT n) const {
+      return toIndex_(n) < storage_.size();
+    }
+
+    typename StorageT::size_type size() const {
+      return storage_.size();
+    }
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_INDEXEDMAP_H
diff --git a/linux-x64/clang/include/llvm/ADT/IntEqClasses.h b/linux-x64/clang/include/llvm/ADT/IntEqClasses.h
new file mode 100644
index 0000000..0baee2f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/IntEqClasses.h
@@ -0,0 +1,88 @@
+//===-- llvm/ADT/IntEqClasses.h - Equiv. Classes of Integers ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Equivalence classes for small integers. This is a mapping of the integers
+// 0 .. N-1 into M equivalence classes numbered 0 .. M-1.
+//
+// Initially each integer has its own equivalence class. Classes are joined by
+// passing a representative member of each class to join().
+//
+// Once the classes are built, compress() will number them 0 .. M-1 and prevent
+// further changes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INTEQCLASSES_H
+#define LLVM_ADT_INTEQCLASSES_H
+
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class IntEqClasses {
+  /// EC - When uncompressed, map each integer to a smaller member of its
+  /// equivalence class. The class leader is the smallest member and maps to
+  /// itself.
+  ///
+  /// When compressed, EC[i] is the equivalence class of i.
+  SmallVector<unsigned, 8> EC;
+
+  /// NumClasses - The number of equivalence classes when compressed, or 0 when
+  /// uncompressed.
+  unsigned NumClasses;
+
+public:
+  /// IntEqClasses - Create an equivalence class mapping for 0 .. N-1.
+  IntEqClasses(unsigned N = 0) : NumClasses(0) { grow(N); }
+
+  /// grow - Increase capacity to hold 0 .. N-1, putting new integers in unique
+  /// equivalence classes.
+  /// This requires an uncompressed map.
+  void grow(unsigned N);
+
+  /// clear - Clear all classes so that grow() will assign a unique class to
+  /// every integer.
+  void clear() {
+    EC.clear();
+    NumClasses = 0;
+  }
+
+  /// Join the equivalence classes of a and b. After joining classes,
+  /// findLeader(a) == findLeader(b). This requires an uncompressed map.
+  /// Returns the new leader.
+  unsigned join(unsigned a, unsigned b);
+
+  /// findLeader - Compute the leader of a's equivalence class. This is the
+  /// smallest member of the class.
+  /// This requires an uncompressed map.
+  unsigned findLeader(unsigned a) const;
+
+  /// compress - Compress equivalence classes by numbering them 0 .. M.
+  /// This makes the equivalence class map immutable.
+  void compress();
+
+  /// getNumClasses - Return the number of equivalence classes after compress()
+  /// was called.
+  unsigned getNumClasses() const { return NumClasses; }
+
+  /// operator[] - Return a's equivalence class number, 0 .. getNumClasses()-1.
+  /// This requires a compressed map.
+  unsigned operator[](unsigned a) const {
+    assert(NumClasses && "operator[] called before compress()");
+    return EC[a];
+  }
+
+  /// uncompress - Change back to the uncompressed representation that allows
+  /// editing.
+  void uncompress();
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/IntervalMap.h b/linux-x64/clang/include/llvm/ADT/IntervalMap.h
new file mode 100644
index 0000000..f713668
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/IntervalMap.h
@@ -0,0 +1,2158 @@
+//===- llvm/ADT/IntervalMap.h - A sorted interval map -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a coalescing interval map for small objects.
+//
+// KeyT objects are mapped to ValT objects. Intervals of keys that map to the
+// same value are represented in a compressed form.
+//
+// Iterators provide ordered access to the compressed intervals rather than the
+// individual keys, and insert and erase operations use key intervals as well.
+//
+// Like SmallVector, IntervalMap will store the first N intervals in the map
+// object itself without any allocations. When space is exhausted it switches to
+// a B+-tree representation with very small overhead for small key and value
+// objects.
+//
+// A Traits class specifies how keys are compared. It also allows IntervalMap to
+// work with both closed and half-open intervals.
+//
+// Keys and values are not stored next to each other in a std::pair, so we don't
+// provide such a value_type. Dereferencing iterators only returns the mapped
+// value. The interval bounds are accessible through the start() and stop()
+// iterator methods.
+//
+// IntervalMap is optimized for small key and value objects, 4 or 8 bytes each
+// is the optimal size. For large objects use std::map instead.
+//
+//===----------------------------------------------------------------------===//
+//
+// Synopsis:
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap {
+// public:
+//   typedef KeyT key_type;
+//   typedef ValT mapped_type;
+//   typedef RecyclingAllocator<...> Allocator;
+//   class iterator;
+//   class const_iterator;
+//
+//   explicit IntervalMap(Allocator&);
+//   ~IntervalMap():
+//
+//   bool empty() const;
+//   KeyT start() const;
+//   KeyT stop() const;
+//   ValT lookup(KeyT x, Value NotFound = Value()) const;
+//
+//   const_iterator begin() const;
+//   const_iterator end() const;
+//   iterator begin();
+//   iterator end();
+//   const_iterator find(KeyT x) const;
+//   iterator find(KeyT x);
+//
+//   void insert(KeyT a, KeyT b, ValT y);
+//   void clear();
+// };
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap::const_iterator :
+//   public std::iterator<std::bidirectional_iterator_tag, ValT> {
+// public:
+//   bool operator==(const const_iterator &) const;
+//   bool operator!=(const const_iterator &) const;
+//   bool valid() const;
+//
+//   const KeyT &start() const;
+//   const KeyT &stop() const;
+//   const ValT &value() const;
+//   const ValT &operator*() const;
+//   const ValT *operator->() const;
+//
+//   const_iterator &operator++();
+//   const_iterator &operator++(int);
+//   const_iterator &operator--();
+//   const_iterator &operator--(int);
+//   void goToBegin();
+//   void goToEnd();
+//   void find(KeyT x);
+//   void advanceTo(KeyT x);
+// };
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap::iterator : public const_iterator {
+// public:
+//   void insert(KeyT a, KeyT b, Value y);
+//   void erase();
+// };
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INTERVALMAP_H
+#define LLVM_ADT_INTERVALMAP_H
+
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/RecyclingAllocator.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+#include <new>
+#include <utility>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+//---                              Key traits                              ---//
+//===----------------------------------------------------------------------===//
+//
+// The IntervalMap works with closed or half-open intervals.
+// Adjacent intervals that map to the same value are coalesced.
+//
+// The IntervalMapInfo traits class is used to determine if a key is contained
+// in an interval, and if two intervals are adjacent so they can be coalesced.
+// The provided implementation works for closed integer intervals, other keys
+// probably need a specialized version.
+//
+// The point x is contained in [a;b] when !startLess(x, a) && !stopLess(b, x).
+//
+// It is assumed that (a;b] half-open intervals are not used, only [a;b) is
+// allowed. This is so that stopLess(a, b) can be used to determine if two
+// intervals overlap.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename T>
+struct IntervalMapInfo {
+  /// startLess - Return true if x is not in [a;b].
+  /// This is x < a both for closed intervals and for [a;b) half-open intervals.
+  static inline bool startLess(const T &x, const T &a) {
+    return x < a;
+  }
+
+  /// stopLess - Return true if x is not in [a;b].
+  /// This is b < x for a closed interval, b <= x for [a;b) half-open intervals.
+  static inline bool stopLess(const T &b, const T &x) {
+    return b < x;
+  }
+
+  /// adjacent - Return true when the intervals [x;a] and [b;y] can coalesce.
+  /// This is a+1 == b for closed intervals, a == b for half-open intervals.
+  static inline bool adjacent(const T &a, const T &b) {
+    return a+1 == b;
+  }
+
+  /// nonEmpty - Return true if [a;b] is non-empty.
+  /// This is a <= b for a closed interval, a < b for [a;b) half-open intervals.
+  static inline bool nonEmpty(const T &a, const T &b) {
+    return a <= b;
+  }
+};
+
+template <typename T>
+struct IntervalMapHalfOpenInfo {
+  /// startLess - Return true if x is not in [a;b).
+  static inline bool startLess(const T &x, const T &a) {
+    return x < a;
+  }
+
+  /// stopLess - Return true if x is not in [a;b).
+  static inline bool stopLess(const T &b, const T &x) {
+    return b <= x;
+  }
+
+  /// adjacent - Return true when the intervals [x;a) and [b;y) can coalesce.
+  static inline bool adjacent(const T &a, const T &b) {
+    return a == b;
+  }
+
+  /// nonEmpty - Return true if [a;b) is non-empty.
+  static inline bool nonEmpty(const T &a, const T &b) {
+    return a < b;
+  }
+};
+
+/// IntervalMapImpl - Namespace used for IntervalMap implementation details.
+/// It should be considered private to the implementation.
+namespace IntervalMapImpl {
+
+using IdxPair = std::pair<unsigned,unsigned>;
+
+//===----------------------------------------------------------------------===//
+//---                    IntervalMapImpl::NodeBase                         ---//
+//===----------------------------------------------------------------------===//
+//
+// Both leaf and branch nodes store vectors of pairs.
+// Leaves store ((KeyT, KeyT), ValT) pairs, branches use (NodeRef, KeyT).
+//
+// Keys and values are stored in separate arrays to avoid padding caused by
+// different object alignments. This also helps improve locality of reference
+// when searching the keys.
+//
+// The nodes don't know how many elements they contain - that information is
+// stored elsewhere. Omitting the size field prevents padding and allows a node
+// to fill the allocated cache lines completely.
+//
+// These are typical key and value sizes, the node branching factor (N), and
+// wasted space when nodes are sized to fit in three cache lines (192 bytes):
+//
+//   T1  T2   N Waste  Used by
+//    4   4  24   0    Branch<4> (32-bit pointers)
+//    8   4  16   0    Leaf<4,4>, Branch<4>
+//    8   8  12   0    Leaf<4,8>, Branch<8>
+//   16   4   9  12    Leaf<8,4>
+//   16   8   8   0    Leaf<8,8>
+//
+//===----------------------------------------------------------------------===//
+
+template <typename T1, typename T2, unsigned N>
+class NodeBase {
+public:
+  enum { Capacity = N };
+
+  T1 first[N];
+  T2 second[N];
+
+  /// copy - Copy elements from another node.
+  /// @param Other Node elements are copied from.
+  /// @param i     Beginning of the source range in other.
+  /// @param j     Beginning of the destination range in this.
+  /// @param Count Number of elements to copy.
+  template <unsigned M>
+  void copy(const NodeBase<T1, T2, M> &Other, unsigned i,
+            unsigned j, unsigned Count) {
+    assert(i + Count <= M && "Invalid source range");
+    assert(j + Count <= N && "Invalid dest range");
+    for (unsigned e = i + Count; i != e; ++i, ++j) {
+      first[j]  = Other.first[i];
+      second[j] = Other.second[i];
+    }
+  }
+
+  /// moveLeft - Move elements to the left.
+  /// @param i     Beginning of the source range.
+  /// @param j     Beginning of the destination range.
+  /// @param Count Number of elements to copy.
+  void moveLeft(unsigned i, unsigned j, unsigned Count) {
+    assert(j <= i && "Use moveRight shift elements right");
+    copy(*this, i, j, Count);
+  }
+
+  /// moveRight - Move elements to the right.
+  /// @param i     Beginning of the source range.
+  /// @param j     Beginning of the destination range.
+  /// @param Count Number of elements to copy.
+  void moveRight(unsigned i, unsigned j, unsigned Count) {
+    assert(i <= j && "Use moveLeft shift elements left");
+    assert(j + Count <= N && "Invalid range");
+    while (Count--) {
+      first[j + Count]  = first[i + Count];
+      second[j + Count] = second[i + Count];
+    }
+  }
+
+  /// erase - Erase elements [i;j).
+  /// @param i    Beginning of the range to erase.
+  /// @param j    End of the range. (Exclusive).
+  /// @param Size Number of elements in node.
+  void erase(unsigned i, unsigned j, unsigned Size) {
+    moveLeft(j, i, Size - j);
+  }
+
+  /// erase - Erase element at i.
+  /// @param i    Index of element to erase.
+  /// @param Size Number of elements in node.
+  void erase(unsigned i, unsigned Size) {
+    erase(i, i+1, Size);
+  }
+
+  /// shift - Shift elements [i;size) 1 position to the right.
+  /// @param i    Beginning of the range to move.
+  /// @param Size Number of elements in node.
+  void shift(unsigned i, unsigned Size) {
+    moveRight(i, i + 1, Size - i);
+  }
+
+  /// transferToLeftSib - Transfer elements to a left sibling node.
+  /// @param Size  Number of elements in this.
+  /// @param Sib   Left sibling node.
+  /// @param SSize Number of elements in sib.
+  /// @param Count Number of elements to transfer.
+  void transferToLeftSib(unsigned Size, NodeBase &Sib, unsigned SSize,
+                         unsigned Count) {
+    Sib.copy(*this, 0, SSize, Count);
+    erase(0, Count, Size);
+  }
+
+  /// transferToRightSib - Transfer elements to a right sibling node.
+  /// @param Size  Number of elements in this.
+  /// @param Sib   Right sibling node.
+  /// @param SSize Number of elements in sib.
+  /// @param Count Number of elements to transfer.
+  void transferToRightSib(unsigned Size, NodeBase &Sib, unsigned SSize,
+                          unsigned Count) {
+    Sib.moveRight(0, Count, SSize);
+    Sib.copy(*this, Size-Count, 0, Count);
+  }
+
+  /// adjustFromLeftSib - Adjust the number if elements in this node by moving
+  /// elements to or from a left sibling node.
+  /// @param Size  Number of elements in this.
+  /// @param Sib   Right sibling node.
+  /// @param SSize Number of elements in sib.
+  /// @param Add   The number of elements to add to this node, possibly < 0.
+  /// @return      Number of elements added to this node, possibly negative.
+  int adjustFromLeftSib(unsigned Size, NodeBase &Sib, unsigned SSize, int Add) {
+    if (Add > 0) {
+      // We want to grow, copy from sib.
+      unsigned Count = std::min(std::min(unsigned(Add), SSize), N - Size);
+      Sib.transferToRightSib(SSize, *this, Size, Count);
+      return Count;
+    } else {
+      // We want to shrink, copy to sib.
+      unsigned Count = std::min(std::min(unsigned(-Add), Size), N - SSize);
+      transferToLeftSib(Size, Sib, SSize, Count);
+      return -Count;
+    }
+  }
+};
+
+/// IntervalMapImpl::adjustSiblingSizes - Move elements between sibling nodes.
+/// @param Node  Array of pointers to sibling nodes.
+/// @param Nodes Number of nodes.
+/// @param CurSize Array of current node sizes, will be overwritten.
+/// @param NewSize Array of desired node sizes.
+template <typename NodeT>
+void adjustSiblingSizes(NodeT *Node[], unsigned Nodes,
+                        unsigned CurSize[], const unsigned NewSize[]) {
+  // Move elements right.
+  for (int n = Nodes - 1; n; --n) {
+    if (CurSize[n] == NewSize[n])
+      continue;
+    for (int m = n - 1; m != -1; --m) {
+      int d = Node[n]->adjustFromLeftSib(CurSize[n], *Node[m], CurSize[m],
+                                         NewSize[n] - CurSize[n]);
+      CurSize[m] -= d;
+      CurSize[n] += d;
+      // Keep going if the current node was exhausted.
+      if (CurSize[n] >= NewSize[n])
+          break;
+    }
+  }
+
+  if (Nodes == 0)
+    return;
+
+  // Move elements left.
+  for (unsigned n = 0; n != Nodes - 1; ++n) {
+    if (CurSize[n] == NewSize[n])
+      continue;
+    for (unsigned m = n + 1; m != Nodes; ++m) {
+      int d = Node[m]->adjustFromLeftSib(CurSize[m], *Node[n], CurSize[n],
+                                        CurSize[n] -  NewSize[n]);
+      CurSize[m] += d;
+      CurSize[n] -= d;
+      // Keep going if the current node was exhausted.
+      if (CurSize[n] >= NewSize[n])
+          break;
+    }
+  }
+
+#ifndef NDEBUG
+  for (unsigned n = 0; n != Nodes; n++)
+    assert(CurSize[n] == NewSize[n] && "Insufficient element shuffle");
+#endif
+}
+
+/// IntervalMapImpl::distribute - Compute a new distribution of node elements
+/// after an overflow or underflow. Reserve space for a new element at Position,
+/// and compute the node that will hold Position after redistributing node
+/// elements.
+///
+/// It is required that
+///
+///   Elements == sum(CurSize), and
+///   Elements + Grow <= Nodes * Capacity.
+///
+/// NewSize[] will be filled in such that:
+///
+///   sum(NewSize) == Elements, and
+///   NewSize[i] <= Capacity.
+///
+/// The returned index is the node where Position will go, so:
+///
+///   sum(NewSize[0..idx-1]) <= Position
+///   sum(NewSize[0..idx])   >= Position
+///
+/// The last equality, sum(NewSize[0..idx]) == Position, can only happen when
+/// Grow is set and NewSize[idx] == Capacity-1. The index points to the node
+/// before the one holding the Position'th element where there is room for an
+/// insertion.
+///
+/// @param Nodes    The number of nodes.
+/// @param Elements Total elements in all nodes.
+/// @param Capacity The capacity of each node.
+/// @param CurSize  Array[Nodes] of current node sizes, or NULL.
+/// @param NewSize  Array[Nodes] to receive the new node sizes.
+/// @param Position Insert position.
+/// @param Grow     Reserve space for a new element at Position.
+/// @return         (node, offset) for Position.
+IdxPair distribute(unsigned Nodes, unsigned Elements, unsigned Capacity,
+                   const unsigned *CurSize, unsigned NewSize[],
+                   unsigned Position, bool Grow);
+
+//===----------------------------------------------------------------------===//
+//---                   IntervalMapImpl::NodeSizer                         ---//
+//===----------------------------------------------------------------------===//
+//
+// Compute node sizes from key and value types.
+//
+// The branching factors are chosen to make nodes fit in three cache lines.
+// This may not be possible if keys or values are very large. Such large objects
+// are handled correctly, but a std::map would probably give better performance.
+//
+//===----------------------------------------------------------------------===//
+
+enum {
+  // Cache line size. Most architectures have 32 or 64 byte cache lines.
+  // We use 64 bytes here because it provides good branching factors.
+  Log2CacheLine = 6,
+  CacheLineBytes = 1 << Log2CacheLine,
+  DesiredNodeBytes = 3 * CacheLineBytes
+};
+
+template <typename KeyT, typename ValT>
+struct NodeSizer {
+  enum {
+    // Compute the leaf node branching factor that makes a node fit in three
+    // cache lines. The branching factor must be at least 3, or some B+-tree
+    // balancing algorithms won't work.
+    // LeafSize can't be larger than CacheLineBytes. This is required by the
+    // PointerIntPair used by NodeRef.
+    DesiredLeafSize = DesiredNodeBytes /
+      static_cast<unsigned>(2*sizeof(KeyT)+sizeof(ValT)),
+    MinLeafSize = 3,
+    LeafSize = DesiredLeafSize > MinLeafSize ? DesiredLeafSize : MinLeafSize
+  };
+
+  using LeafBase = NodeBase<std::pair<KeyT, KeyT>, ValT, LeafSize>;
+
+  enum {
+    // Now that we have the leaf branching factor, compute the actual allocation
+    // unit size by rounding up to a whole number of cache lines.
+    AllocBytes = (sizeof(LeafBase) + CacheLineBytes-1) & ~(CacheLineBytes-1),
+
+    // Determine the branching factor for branch nodes.
+    BranchSize = AllocBytes /
+      static_cast<unsigned>(sizeof(KeyT) + sizeof(void*))
+  };
+
+  /// Allocator - The recycling allocator used for both branch and leaf nodes.
+  /// This typedef is very likely to be identical for all IntervalMaps with
+  /// reasonably sized entries, so the same allocator can be shared among
+  /// different kinds of maps.
+  using Allocator =
+      RecyclingAllocator<BumpPtrAllocator, char, AllocBytes, CacheLineBytes>;
+};
+
+//===----------------------------------------------------------------------===//
+//---                     IntervalMapImpl::NodeRef                         ---//
+//===----------------------------------------------------------------------===//
+//
+// B+-tree nodes can be leaves or branches, so we need a polymorphic node
+// pointer that can point to both kinds.
+//
+// All nodes are cache line aligned and the low 6 bits of a node pointer are
+// always 0. These bits are used to store the number of elements in the
+// referenced node. Besides saving space, placing node sizes in the parents
+// allow tree balancing algorithms to run without faulting cache lines for nodes
+// that may not need to be modified.
+//
+// A NodeRef doesn't know whether it references a leaf node or a branch node.
+// It is the responsibility of the caller to use the correct types.
+//
+// Nodes are never supposed to be empty, and it is invalid to store a node size
+// of 0 in a NodeRef. The valid range of sizes is 1-64.
+//
+//===----------------------------------------------------------------------===//
+
+class NodeRef {
+  struct CacheAlignedPointerTraits {
+    static inline void *getAsVoidPointer(void *P) { return P; }
+    static inline void *getFromVoidPointer(void *P) { return P; }
+    enum { NumLowBitsAvailable = Log2CacheLine };
+  };
+  PointerIntPair<void*, Log2CacheLine, unsigned, CacheAlignedPointerTraits> pip;
+
+public:
+  /// NodeRef - Create a null ref.
+  NodeRef() = default;
+
+  /// operator bool - Detect a null ref.
+  explicit operator bool() const { return pip.getOpaqueValue(); }
+
+  /// NodeRef - Create a reference to the node p with n elements.
+  template <typename NodeT>
+  NodeRef(NodeT *p, unsigned n) : pip(p, n - 1) {
+    assert(n <= NodeT::Capacity && "Size too big for node");
+  }
+
+  /// size - Return the number of elements in the referenced node.
+  unsigned size() const { return pip.getInt() + 1; }
+
+  /// setSize - Update the node size.
+  void setSize(unsigned n) { pip.setInt(n - 1); }
+
+  /// subtree - Access the i'th subtree reference in a branch node.
+  /// This depends on branch nodes storing the NodeRef array as their first
+  /// member.
+  NodeRef &subtree(unsigned i) const {
+    return reinterpret_cast<NodeRef*>(pip.getPointer())[i];
+  }
+
+  /// get - Dereference as a NodeT reference.
+  template <typename NodeT>
+  NodeT &get() const {
+    return *reinterpret_cast<NodeT*>(pip.getPointer());
+  }
+
+  bool operator==(const NodeRef &RHS) const {
+    if (pip == RHS.pip)
+      return true;
+    assert(pip.getPointer() != RHS.pip.getPointer() && "Inconsistent NodeRefs");
+    return false;
+  }
+
+  bool operator!=(const NodeRef &RHS) const {
+    return !operator==(RHS);
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//---                      IntervalMapImpl::LeafNode                       ---//
+//===----------------------------------------------------------------------===//
+//
+// Leaf nodes store up to N disjoint intervals with corresponding values.
+//
+// The intervals are kept sorted and fully coalesced so there are no adjacent
+// intervals mapping to the same value.
+//
+// These constraints are always satisfied:
+//
+// - Traits::stopLess(start(i), stop(i))    - Non-empty, sane intervals.
+//
+// - Traits::stopLess(stop(i), start(i + 1) - Sorted.
+//
+// - value(i) != value(i + 1) || !Traits::adjacent(stop(i), start(i + 1))
+//                                          - Fully coalesced.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class LeafNode : public NodeBase<std::pair<KeyT, KeyT>, ValT, N> {
+public:
+  const KeyT &start(unsigned i) const { return this->first[i].first; }
+  const KeyT &stop(unsigned i) const { return this->first[i].second; }
+  const ValT &value(unsigned i) const { return this->second[i]; }
+
+  KeyT &start(unsigned i) { return this->first[i].first; }
+  KeyT &stop(unsigned i) { return this->first[i].second; }
+  ValT &value(unsigned i) { return this->second[i]; }
+
+  /// findFrom - Find the first interval after i that may contain x.
+  /// @param i    Starting index for the search.
+  /// @param Size Number of elements in node.
+  /// @param x    Key to search for.
+  /// @return     First index with !stopLess(key[i].stop, x), or size.
+  ///             This is the first interval that can possibly contain x.
+  unsigned findFrom(unsigned i, unsigned Size, KeyT x) const {
+    assert(i <= Size && Size <= N && "Bad indices");
+    assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+           "Index is past the needed point");
+    while (i != Size && Traits::stopLess(stop(i), x)) ++i;
+    return i;
+  }
+
+  /// safeFind - Find an interval that is known to exist. This is the same as
+  /// findFrom except is it assumed that x is at least within range of the last
+  /// interval.
+  /// @param i Starting index for the search.
+  /// @param x Key to search for.
+  /// @return  First index with !stopLess(key[i].stop, x), never size.
+  ///          This is the first interval that can possibly contain x.
+  unsigned safeFind(unsigned i, KeyT x) const {
+    assert(i < N && "Bad index");
+    assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+           "Index is past the needed point");
+    while (Traits::stopLess(stop(i), x)) ++i;
+    assert(i < N && "Unsafe intervals");
+    return i;
+  }
+
+  /// safeLookup - Lookup mapped value for a safe key.
+  /// It is assumed that x is within range of the last entry.
+  /// @param x        Key to search for.
+  /// @param NotFound Value to return if x is not in any interval.
+  /// @return         The mapped value at x or NotFound.
+  ValT safeLookup(KeyT x, ValT NotFound) const {
+    unsigned i = safeFind(0, x);
+    return Traits::startLess(x, start(i)) ? NotFound : value(i);
+  }
+
+  unsigned insertFrom(unsigned &Pos, unsigned Size, KeyT a, KeyT b, ValT y);
+};
+
+/// insertFrom - Add mapping of [a;b] to y if possible, coalescing as much as
+/// possible. This may cause the node to grow by 1, or it may cause the node
+/// to shrink because of coalescing.
+/// @param Pos  Starting index = insertFrom(0, size, a)
+/// @param Size Number of elements in node.
+/// @param a    Interval start.
+/// @param b    Interval stop.
+/// @param y    Value be mapped.
+/// @return     (insert position, new size), or (i, Capacity+1) on overflow.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+unsigned LeafNode<KeyT, ValT, N, Traits>::
+insertFrom(unsigned &Pos, unsigned Size, KeyT a, KeyT b, ValT y) {
+  unsigned i = Pos;
+  assert(i <= Size && Size <= N && "Invalid index");
+  assert(!Traits::stopLess(b, a) && "Invalid interval");
+
+  // Verify the findFrom invariant.
+  assert((i == 0 || Traits::stopLess(stop(i - 1), a)));
+  assert((i == Size || !Traits::stopLess(stop(i), a)));
+  assert((i == Size || Traits::stopLess(b, start(i))) && "Overlapping insert");
+
+  // Coalesce with previous interval.
+  if (i && value(i - 1) == y && Traits::adjacent(stop(i - 1), a)) {
+    Pos = i - 1;
+    // Also coalesce with next interval?
+    if (i != Size && value(i) == y && Traits::adjacent(b, start(i))) {
+      stop(i - 1) = stop(i);
+      this->erase(i, Size);
+      return Size - 1;
+    }
+    stop(i - 1) = b;
+    return Size;
+  }
+
+  // Detect overflow.
+  if (i == N)
+    return N + 1;
+
+  // Add new interval at end.
+  if (i == Size) {
+    start(i) = a;
+    stop(i) = b;
+    value(i) = y;
+    return Size + 1;
+  }
+
+  // Try to coalesce with following interval.
+  if (value(i) == y && Traits::adjacent(b, start(i))) {
+    start(i) = a;
+    return Size;
+  }
+
+  // We must insert before i. Detect overflow.
+  if (Size == N)
+    return N + 1;
+
+  // Insert before i.
+  this->shift(i, Size);
+  start(i) = a;
+  stop(i) = b;
+  value(i) = y;
+  return Size + 1;
+}
+
+//===----------------------------------------------------------------------===//
+//---                   IntervalMapImpl::BranchNode                        ---//
+//===----------------------------------------------------------------------===//
+//
+// A branch node stores references to 1--N subtrees all of the same height.
+//
+// The key array in a branch node holds the rightmost stop key of each subtree.
+// It is redundant to store the last stop key since it can be found in the
+// parent node, but doing so makes tree balancing a lot simpler.
+//
+// It is unusual for a branch node to only have one subtree, but it can happen
+// in the root node if it is smaller than the normal nodes.
+//
+// When all of the leaf nodes from all the subtrees are concatenated, they must
+// satisfy the same constraints as a single leaf node. They must be sorted,
+// sane, and fully coalesced.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class BranchNode : public NodeBase<NodeRef, KeyT, N> {
+public:
+  const KeyT &stop(unsigned i) const { return this->second[i]; }
+  const NodeRef &subtree(unsigned i) const { return this->first[i]; }
+
+  KeyT &stop(unsigned i) { return this->second[i]; }
+  NodeRef &subtree(unsigned i) { return this->first[i]; }
+
+  /// findFrom - Find the first subtree after i that may contain x.
+  /// @param i    Starting index for the search.
+  /// @param Size Number of elements in node.
+  /// @param x    Key to search for.
+  /// @return     First index with !stopLess(key[i], x), or size.
+  ///             This is the first subtree that can possibly contain x.
+  unsigned findFrom(unsigned i, unsigned Size, KeyT x) const {
+    assert(i <= Size && Size <= N && "Bad indices");
+    assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+           "Index to findFrom is past the needed point");
+    while (i != Size && Traits::stopLess(stop(i), x)) ++i;
+    return i;
+  }
+
+  /// safeFind - Find a subtree that is known to exist. This is the same as
+  /// findFrom except is it assumed that x is in range.
+  /// @param i Starting index for the search.
+  /// @param x Key to search for.
+  /// @return  First index with !stopLess(key[i], x), never size.
+  ///          This is the first subtree that can possibly contain x.
+  unsigned safeFind(unsigned i, KeyT x) const {
+    assert(i < N && "Bad index");
+    assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+           "Index is past the needed point");
+    while (Traits::stopLess(stop(i), x)) ++i;
+    assert(i < N && "Unsafe intervals");
+    return i;
+  }
+
+  /// safeLookup - Get the subtree containing x, Assuming that x is in range.
+  /// @param x Key to search for.
+  /// @return  Subtree containing x
+  NodeRef safeLookup(KeyT x) const {
+    return subtree(safeFind(0, x));
+  }
+
+  /// insert - Insert a new (subtree, stop) pair.
+  /// @param i    Insert position, following entries will be shifted.
+  /// @param Size Number of elements in node.
+  /// @param Node Subtree to insert.
+  /// @param Stop Last key in subtree.
+  void insert(unsigned i, unsigned Size, NodeRef Node, KeyT Stop) {
+    assert(Size < N && "branch node overflow");
+    assert(i <= Size && "Bad insert position");
+    this->shift(i, Size);
+    subtree(i) = Node;
+    stop(i) = Stop;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//---                         IntervalMapImpl::Path                        ---//
+//===----------------------------------------------------------------------===//
+//
+// A Path is used by iterators to represent a position in a B+-tree, and the
+// path to get there from the root.
+//
+// The Path class also contains the tree navigation code that doesn't have to
+// be templatized.
+//
+//===----------------------------------------------------------------------===//
+
+class Path {
+  /// Entry - Each step in the path is a node pointer and an offset into that
+  /// node.
+  struct Entry {
+    void *node;
+    unsigned size;
+    unsigned offset;
+
+    Entry(void *Node, unsigned Size, unsigned Offset)
+      : node(Node), size(Size), offset(Offset) {}
+
+    Entry(NodeRef Node, unsigned Offset)
+      : node(&Node.subtree(0)), size(Node.size()), offset(Offset) {}
+
+    NodeRef &subtree(unsigned i) const {
+      return reinterpret_cast<NodeRef*>(node)[i];
+    }
+  };
+
+  /// path - The path entries, path[0] is the root node, path.back() is a leaf.
+  SmallVector<Entry, 4> path;
+
+public:
+  // Node accessors.
+  template <typename NodeT> NodeT &node(unsigned Level) const {
+    return *reinterpret_cast<NodeT*>(path[Level].node);
+  }
+  unsigned size(unsigned Level) const { return path[Level].size; }
+  unsigned offset(unsigned Level) const { return path[Level].offset; }
+  unsigned &offset(unsigned Level) { return path[Level].offset; }
+
+  // Leaf accessors.
+  template <typename NodeT> NodeT &leaf() const {
+    return *reinterpret_cast<NodeT*>(path.back().node);
+  }
+  unsigned leafSize() const { return path.back().size; }
+  unsigned leafOffset() const { return path.back().offset; }
+  unsigned &leafOffset() { return path.back().offset; }
+
+  /// valid - Return true if path is at a valid node, not at end().
+  bool valid() const {
+    return !path.empty() && path.front().offset < path.front().size;
+  }
+
+  /// height - Return the height of the tree corresponding to this path.
+  /// This matches map->height in a full path.
+  unsigned height() const { return path.size() - 1; }
+
+  /// subtree - Get the subtree referenced from Level. When the path is
+  /// consistent, node(Level + 1) == subtree(Level).
+  /// @param Level 0..height-1. The leaves have no subtrees.
+  NodeRef &subtree(unsigned Level) const {
+    return path[Level].subtree(path[Level].offset);
+  }
+
+  /// reset - Reset cached information about node(Level) from subtree(Level -1).
+  /// @param Level 1..height. THe node to update after parent node changed.
+  void reset(unsigned Level) {
+    path[Level] = Entry(subtree(Level - 1), offset(Level));
+  }
+
+  /// push - Add entry to path.
+  /// @param Node Node to add, should be subtree(path.size()-1).
+  /// @param Offset Offset into Node.
+  void push(NodeRef Node, unsigned Offset) {
+    path.push_back(Entry(Node, Offset));
+  }
+
+  /// pop - Remove the last path entry.
+  void pop() {
+    path.pop_back();
+  }
+
+  /// setSize - Set the size of a node both in the path and in the tree.
+  /// @param Level 0..height. Note that setting the root size won't change
+  ///              map->rootSize.
+  /// @param Size New node size.
+  void setSize(unsigned Level, unsigned Size) {
+    path[Level].size = Size;
+    if (Level)
+      subtree(Level - 1).setSize(Size);
+  }
+
+  /// setRoot - Clear the path and set a new root node.
+  /// @param Node New root node.
+  /// @param Size New root size.
+  /// @param Offset Offset into root node.
+  void setRoot(void *Node, unsigned Size, unsigned Offset) {
+    path.clear();
+    path.push_back(Entry(Node, Size, Offset));
+  }
+
+  /// replaceRoot - Replace the current root node with two new entries after the
+  /// tree height has increased.
+  /// @param Root The new root node.
+  /// @param Size Number of entries in the new root.
+  /// @param Offsets Offsets into the root and first branch nodes.
+  void replaceRoot(void *Root, unsigned Size, IdxPair Offsets);
+
+  /// getLeftSibling - Get the left sibling node at Level, or a null NodeRef.
+  /// @param Level Get the sibling to node(Level).
+  /// @return Left sibling, or NodeRef().
+  NodeRef getLeftSibling(unsigned Level) const;
+
+  /// moveLeft - Move path to the left sibling at Level. Leave nodes below Level
+  /// unaltered.
+  /// @param Level Move node(Level).
+  void moveLeft(unsigned Level);
+
+  /// fillLeft - Grow path to Height by taking leftmost branches.
+  /// @param Height The target height.
+  void fillLeft(unsigned Height) {
+    while (height() < Height)
+      push(subtree(height()), 0);
+  }
+
+  /// getLeftSibling - Get the left sibling node at Level, or a null NodeRef.
+  /// @param Level Get the sinbling to node(Level).
+  /// @return Left sibling, or NodeRef().
+  NodeRef getRightSibling(unsigned Level) const;
+
+  /// moveRight - Move path to the left sibling at Level. Leave nodes below
+  /// Level unaltered.
+  /// @param Level Move node(Level).
+  void moveRight(unsigned Level);
+
+  /// atBegin - Return true if path is at begin().
+  bool atBegin() const {
+    for (unsigned i = 0, e = path.size(); i != e; ++i)
+      if (path[i].offset != 0)
+        return false;
+    return true;
+  }
+
+  /// atLastEntry - Return true if the path is at the last entry of the node at
+  /// Level.
+  /// @param Level Node to examine.
+  bool atLastEntry(unsigned Level) const {
+    return path[Level].offset == path[Level].size - 1;
+  }
+
+  /// legalizeForInsert - Prepare the path for an insertion at Level. When the
+  /// path is at end(), node(Level) may not be a legal node. legalizeForInsert
+  /// ensures that node(Level) is real by moving back to the last node at Level,
+  /// and setting offset(Level) to size(Level) if required.
+  /// @param Level The level where an insertion is about to take place.
+  void legalizeForInsert(unsigned Level) {
+    if (valid())
+      return;
+    moveLeft(Level);
+    ++path[Level].offset;
+  }
+};
+
+} // end namespace IntervalMapImpl
+
+//===----------------------------------------------------------------------===//
+//---                          IntervalMap                                ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT,
+          unsigned N = IntervalMapImpl::NodeSizer<KeyT, ValT>::LeafSize,
+          typename Traits = IntervalMapInfo<KeyT>>
+class IntervalMap {
+  using Sizer = IntervalMapImpl::NodeSizer<KeyT, ValT>;
+  using Leaf = IntervalMapImpl::LeafNode<KeyT, ValT, Sizer::LeafSize, Traits>;
+  using Branch =
+      IntervalMapImpl::BranchNode<KeyT, ValT, Sizer::BranchSize, Traits>;
+  using RootLeaf = IntervalMapImpl::LeafNode<KeyT, ValT, N, Traits>;
+  using IdxPair = IntervalMapImpl::IdxPair;
+
+  // The RootLeaf capacity is given as a template parameter. We must compute the
+  // corresponding RootBranch capacity.
+  enum {
+    DesiredRootBranchCap = (sizeof(RootLeaf) - sizeof(KeyT)) /
+      (sizeof(KeyT) + sizeof(IntervalMapImpl::NodeRef)),
+    RootBranchCap = DesiredRootBranchCap ? DesiredRootBranchCap : 1
+  };
+
+  using RootBranch =
+      IntervalMapImpl::BranchNode<KeyT, ValT, RootBranchCap, Traits>;
+
+  // When branched, we store a global start key as well as the branch node.
+  struct RootBranchData {
+    KeyT start;
+    RootBranch node;
+  };
+
+public:
+  using Allocator = typename Sizer::Allocator;
+  using KeyType = KeyT;
+  using ValueType = ValT;
+  using KeyTraits = Traits;
+
+private:
+  // The root data is either a RootLeaf or a RootBranchData instance.
+  AlignedCharArrayUnion<RootLeaf, RootBranchData> data;
+
+  // Tree height.
+  // 0: Leaves in root.
+  // 1: Root points to leaf.
+  // 2: root->branch->leaf ...
+  unsigned height;
+
+  // Number of entries in the root node.
+  unsigned rootSize;
+
+  // Allocator used for creating external nodes.
+  Allocator &allocator;
+
+  /// dataAs - Represent data as a node type without breaking aliasing rules.
+  template <typename T>
+  T &dataAs() const {
+    union {
+      const char *d;
+      T *t;
+    } u;
+    u.d = data.buffer;
+    return *u.t;
+  }
+
+  const RootLeaf &rootLeaf() const {
+    assert(!branched() && "Cannot acces leaf data in branched root");
+    return dataAs<RootLeaf>();
+  }
+  RootLeaf &rootLeaf() {
+    assert(!branched() && "Cannot acces leaf data in branched root");
+    return dataAs<RootLeaf>();
+  }
+
+  RootBranchData &rootBranchData() const {
+    assert(branched() && "Cannot access branch data in non-branched root");
+    return dataAs<RootBranchData>();
+  }
+  RootBranchData &rootBranchData() {
+    assert(branched() && "Cannot access branch data in non-branched root");
+    return dataAs<RootBranchData>();
+  }
+
+  const RootBranch &rootBranch() const { return rootBranchData().node; }
+  RootBranch &rootBranch()             { return rootBranchData().node; }
+  KeyT rootBranchStart() const { return rootBranchData().start; }
+  KeyT &rootBranchStart()      { return rootBranchData().start; }
+
+  template <typename NodeT> NodeT *newNode() {
+    return new(allocator.template Allocate<NodeT>()) NodeT();
+  }
+
+  template <typename NodeT> void deleteNode(NodeT *P) {
+    P->~NodeT();
+    allocator.Deallocate(P);
+  }
+
+  IdxPair branchRoot(unsigned Position);
+  IdxPair splitRoot(unsigned Position);
+
+  void switchRootToBranch() {
+    rootLeaf().~RootLeaf();
+    height = 1;
+    new (&rootBranchData()) RootBranchData();
+  }
+
+  void switchRootToLeaf() {
+    rootBranchData().~RootBranchData();
+    height = 0;
+    new(&rootLeaf()) RootLeaf();
+  }
+
+  bool branched() const { return height > 0; }
+
+  ValT treeSafeLookup(KeyT x, ValT NotFound) const;
+  void visitNodes(void (IntervalMap::*f)(IntervalMapImpl::NodeRef,
+                  unsigned Level));
+  void deleteNode(IntervalMapImpl::NodeRef Node, unsigned Level);
+
+public:
+  explicit IntervalMap(Allocator &a) : height(0), rootSize(0), allocator(a) {
+    assert((uintptr_t(data.buffer) & (alignof(RootLeaf) - 1)) == 0 &&
+           "Insufficient alignment");
+    new(&rootLeaf()) RootLeaf();
+  }
+
+  ~IntervalMap() {
+    clear();
+    rootLeaf().~RootLeaf();
+  }
+
+  /// empty -  Return true when no intervals are mapped.
+  bool empty() const {
+    return rootSize == 0;
+  }
+
+  /// start - Return the smallest mapped key in a non-empty map.
+  KeyT start() const {
+    assert(!empty() && "Empty IntervalMap has no start");
+    return !branched() ? rootLeaf().start(0) : rootBranchStart();
+  }
+
+  /// stop - Return the largest mapped key in a non-empty map.
+  KeyT stop() const {
+    assert(!empty() && "Empty IntervalMap has no stop");
+    return !branched() ? rootLeaf().stop(rootSize - 1) :
+                         rootBranch().stop(rootSize - 1);
+  }
+
+  /// lookup - Return the mapped value at x or NotFound.
+  ValT lookup(KeyT x, ValT NotFound = ValT()) const {
+    if (empty() || Traits::startLess(x, start()) || Traits::stopLess(stop(), x))
+      return NotFound;
+    return branched() ? treeSafeLookup(x, NotFound) :
+                        rootLeaf().safeLookup(x, NotFound);
+  }
+
+  /// insert - Add a mapping of [a;b] to y, coalesce with adjacent intervals.
+  /// It is assumed that no key in the interval is mapped to another value, but
+  /// overlapping intervals already mapped to y will be coalesced.
+  void insert(KeyT a, KeyT b, ValT y) {
+    if (branched() || rootSize == RootLeaf::Capacity)
+      return find(a).insert(a, b, y);
+
+    // Easy insert into root leaf.
+    unsigned p = rootLeaf().findFrom(0, rootSize, a);
+    rootSize = rootLeaf().insertFrom(p, rootSize, a, b, y);
+  }
+
+  /// clear - Remove all entries.
+  void clear();
+
+  class const_iterator;
+  class iterator;
+  friend class const_iterator;
+  friend class iterator;
+
+  const_iterator begin() const {
+    const_iterator I(*this);
+    I.goToBegin();
+    return I;
+  }
+
+  iterator begin() {
+    iterator I(*this);
+    I.goToBegin();
+    return I;
+  }
+
+  const_iterator end() const {
+    const_iterator I(*this);
+    I.goToEnd();
+    return I;
+  }
+
+  iterator end() {
+    iterator I(*this);
+    I.goToEnd();
+    return I;
+  }
+
+  /// find - Return an iterator pointing to the first interval ending at or
+  /// after x, or end().
+  const_iterator find(KeyT x) const {
+    const_iterator I(*this);
+    I.find(x);
+    return I;
+  }
+
+  iterator find(KeyT x) {
+    iterator I(*this);
+    I.find(x);
+    return I;
+  }
+};
+
+/// treeSafeLookup - Return the mapped value at x or NotFound, assuming a
+/// branched root.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+ValT IntervalMap<KeyT, ValT, N, Traits>::
+treeSafeLookup(KeyT x, ValT NotFound) const {
+  assert(branched() && "treeLookup assumes a branched root");
+
+  IntervalMapImpl::NodeRef NR = rootBranch().safeLookup(x);
+  for (unsigned h = height-1; h; --h)
+    NR = NR.get<Branch>().safeLookup(x);
+  return NR.get<Leaf>().safeLookup(x, NotFound);
+}
+
+// branchRoot - Switch from a leaf root to a branched root.
+// Return the new (root offset, node offset) corresponding to Position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+IntervalMapImpl::IdxPair IntervalMap<KeyT, ValT, N, Traits>::
+branchRoot(unsigned Position) {
+  using namespace IntervalMapImpl;
+  // How many external leaf nodes to hold RootLeaf+1?
+  const unsigned Nodes = RootLeaf::Capacity / Leaf::Capacity + 1;
+
+  // Compute element distribution among new nodes.
+  unsigned size[Nodes];
+  IdxPair NewOffset(0, Position);
+
+  // Is is very common for the root node to be smaller than external nodes.
+  if (Nodes == 1)
+    size[0] = rootSize;
+  else
+    NewOffset = distribute(Nodes, rootSize, Leaf::Capacity,  nullptr, size,
+                           Position, true);
+
+  // Allocate new nodes.
+  unsigned pos = 0;
+  NodeRef node[Nodes];
+  for (unsigned n = 0; n != Nodes; ++n) {
+    Leaf *L = newNode<Leaf>();
+    L->copy(rootLeaf(), pos, 0, size[n]);
+    node[n] = NodeRef(L, size[n]);
+    pos += size[n];
+  }
+
+  // Destroy the old leaf node, construct branch node instead.
+  switchRootToBranch();
+  for (unsigned n = 0; n != Nodes; ++n) {
+    rootBranch().stop(n) = node[n].template get<Leaf>().stop(size[n]-1);
+    rootBranch().subtree(n) = node[n];
+  }
+  rootBranchStart() = node[0].template get<Leaf>().start(0);
+  rootSize = Nodes;
+  return NewOffset;
+}
+
+// splitRoot - Split the current BranchRoot into multiple Branch nodes.
+// Return the new (root offset, node offset) corresponding to Position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+IntervalMapImpl::IdxPair IntervalMap<KeyT, ValT, N, Traits>::
+splitRoot(unsigned Position) {
+  using namespace IntervalMapImpl;
+  // How many external leaf nodes to hold RootBranch+1?
+  const unsigned Nodes = RootBranch::Capacity / Branch::Capacity + 1;
+
+  // Compute element distribution among new nodes.
+  unsigned Size[Nodes];
+  IdxPair NewOffset(0, Position);
+
+  // Is is very common for the root node to be smaller than external nodes.
+  if (Nodes == 1)
+    Size[0] = rootSize;
+  else
+    NewOffset = distribute(Nodes, rootSize, Leaf::Capacity,  nullptr, Size,
+                           Position, true);
+
+  // Allocate new nodes.
+  unsigned Pos = 0;
+  NodeRef Node[Nodes];
+  for (unsigned n = 0; n != Nodes; ++n) {
+    Branch *B = newNode<Branch>();
+    B->copy(rootBranch(), Pos, 0, Size[n]);
+    Node[n] = NodeRef(B, Size[n]);
+    Pos += Size[n];
+  }
+
+  for (unsigned n = 0; n != Nodes; ++n) {
+    rootBranch().stop(n) = Node[n].template get<Branch>().stop(Size[n]-1);
+    rootBranch().subtree(n) = Node[n];
+  }
+  rootSize = Nodes;
+  ++height;
+  return NewOffset;
+}
+
+/// visitNodes - Visit each external node.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+visitNodes(void (IntervalMap::*f)(IntervalMapImpl::NodeRef, unsigned Height)) {
+  if (!branched())
+    return;
+  SmallVector<IntervalMapImpl::NodeRef, 4> Refs, NextRefs;
+
+  // Collect level 0 nodes from the root.
+  for (unsigned i = 0; i != rootSize; ++i)
+    Refs.push_back(rootBranch().subtree(i));
+
+  // Visit all branch nodes.
+  for (unsigned h = height - 1; h; --h) {
+    for (unsigned i = 0, e = Refs.size(); i != e; ++i) {
+      for (unsigned j = 0, s = Refs[i].size(); j != s; ++j)
+        NextRefs.push_back(Refs[i].subtree(j));
+      (this->*f)(Refs[i], h);
+    }
+    Refs.clear();
+    Refs.swap(NextRefs);
+  }
+
+  // Visit all leaf nodes.
+  for (unsigned i = 0, e = Refs.size(); i != e; ++i)
+    (this->*f)(Refs[i], 0);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+deleteNode(IntervalMapImpl::NodeRef Node, unsigned Level) {
+  if (Level)
+    deleteNode(&Node.get<Branch>());
+  else
+    deleteNode(&Node.get<Leaf>());
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+clear() {
+  if (branched()) {
+    visitNodes(&IntervalMap::deleteNode);
+    switchRootToLeaf();
+  }
+  rootSize = 0;
+}
+
+//===----------------------------------------------------------------------===//
+//---                   IntervalMap::const_iterator                       ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class IntervalMap<KeyT, ValT, N, Traits>::const_iterator :
+  public std::iterator<std::bidirectional_iterator_tag, ValT> {
+
+protected:
+  friend class IntervalMap;
+
+  // The map referred to.
+  IntervalMap *map = nullptr;
+
+  // We store a full path from the root to the current position.
+  // The path may be partially filled, but never between iterator calls.
+  IntervalMapImpl::Path path;
+
+  explicit const_iterator(const IntervalMap &map) :
+    map(const_cast<IntervalMap*>(&map)) {}
+
+  bool branched() const {
+    assert(map && "Invalid iterator");
+    return map->branched();
+  }
+
+  void setRoot(unsigned Offset) {
+    if (branched())
+      path.setRoot(&map->rootBranch(), map->rootSize, Offset);
+    else
+      path.setRoot(&map->rootLeaf(), map->rootSize, Offset);
+  }
+
+  void pathFillFind(KeyT x);
+  void treeFind(KeyT x);
+  void treeAdvanceTo(KeyT x);
+
+  /// unsafeStart - Writable access to start() for iterator.
+  KeyT &unsafeStart() const {
+    assert(valid() && "Cannot access invalid iterator");
+    return branched() ? path.leaf<Leaf>().start(path.leafOffset()) :
+                        path.leaf<RootLeaf>().start(path.leafOffset());
+  }
+
+  /// unsafeStop - Writable access to stop() for iterator.
+  KeyT &unsafeStop() const {
+    assert(valid() && "Cannot access invalid iterator");
+    return branched() ? path.leaf<Leaf>().stop(path.leafOffset()) :
+                        path.leaf<RootLeaf>().stop(path.leafOffset());
+  }
+
+  /// unsafeValue - Writable access to value() for iterator.
+  ValT &unsafeValue() const {
+    assert(valid() && "Cannot access invalid iterator");
+    return branched() ? path.leaf<Leaf>().value(path.leafOffset()) :
+                        path.leaf<RootLeaf>().value(path.leafOffset());
+  }
+
+public:
+  /// const_iterator - Create an iterator that isn't pointing anywhere.
+  const_iterator() = default;
+
+  /// setMap - Change the map iterated over. This call must be followed by a
+  /// call to goToBegin(), goToEnd(), or find()
+  void setMap(const IntervalMap &m) { map = const_cast<IntervalMap*>(&m); }
+
+  /// valid - Return true if the current position is valid, false for end().
+  bool valid() const { return path.valid(); }
+
+  /// atBegin - Return true if the current position is the first map entry.
+  bool atBegin() const { return path.atBegin(); }
+
+  /// start - Return the beginning of the current interval.
+  const KeyT &start() const { return unsafeStart(); }
+
+  /// stop - Return the end of the current interval.
+  const KeyT &stop() const { return unsafeStop(); }
+
+  /// value - Return the mapped value at the current interval.
+  const ValT &value() const { return unsafeValue(); }
+
+  const ValT &operator*() const { return value(); }
+
+  bool operator==(const const_iterator &RHS) const {
+    assert(map == RHS.map && "Cannot compare iterators from different maps");
+    if (!valid())
+      return !RHS.valid();
+    if (path.leafOffset() != RHS.path.leafOffset())
+      return false;
+    return &path.template leaf<Leaf>() == &RHS.path.template leaf<Leaf>();
+  }
+
+  bool operator!=(const const_iterator &RHS) const {
+    return !operator==(RHS);
+  }
+
+  /// goToBegin - Move to the first interval in map.
+  void goToBegin() {
+    setRoot(0);
+    if (branched())
+      path.fillLeft(map->height);
+  }
+
+  /// goToEnd - Move beyond the last interval in map.
+  void goToEnd() {
+    setRoot(map->rootSize);
+  }
+
+  /// preincrement - move to the next interval.
+  const_iterator &operator++() {
+    assert(valid() && "Cannot increment end()");
+    if (++path.leafOffset() == path.leafSize() && branched())
+      path.moveRight(map->height);
+    return *this;
+  }
+
+  /// postincrement - Dont do that!
+  const_iterator operator++(int) {
+    const_iterator tmp = *this;
+    operator++();
+    return tmp;
+  }
+
+  /// predecrement - move to the previous interval.
+  const_iterator &operator--() {
+    if (path.leafOffset() && (valid() || !branched()))
+      --path.leafOffset();
+    else
+      path.moveLeft(map->height);
+    return *this;
+  }
+
+  /// postdecrement - Dont do that!
+  const_iterator operator--(int) {
+    const_iterator tmp = *this;
+    operator--();
+    return tmp;
+  }
+
+  /// find - Move to the first interval with stop >= x, or end().
+  /// This is a full search from the root, the current position is ignored.
+  void find(KeyT x) {
+    if (branched())
+      treeFind(x);
+    else
+      setRoot(map->rootLeaf().findFrom(0, map->rootSize, x));
+  }
+
+  /// advanceTo - Move to the first interval with stop >= x, or end().
+  /// The search is started from the current position, and no earlier positions
+  /// can be found. This is much faster than find() for small moves.
+  void advanceTo(KeyT x) {
+    if (!valid())
+      return;
+    if (branched())
+      treeAdvanceTo(x);
+    else
+      path.leafOffset() =
+        map->rootLeaf().findFrom(path.leafOffset(), map->rootSize, x);
+  }
+};
+
+/// pathFillFind - Complete path by searching for x.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::pathFillFind(KeyT x) {
+  IntervalMapImpl::NodeRef NR = path.subtree(path.height());
+  for (unsigned i = map->height - path.height() - 1; i; --i) {
+    unsigned p = NR.get<Branch>().safeFind(0, x);
+    path.push(NR, p);
+    NR = NR.subtree(p);
+  }
+  path.push(NR, NR.get<Leaf>().safeFind(0, x));
+}
+
+/// treeFind - Find in a branched tree.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::treeFind(KeyT x) {
+  setRoot(map->rootBranch().findFrom(0, map->rootSize, x));
+  if (valid())
+    pathFillFind(x);
+}
+
+/// treeAdvanceTo - Find position after the current one.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::treeAdvanceTo(KeyT x) {
+  // Can we stay on the same leaf node?
+  if (!Traits::stopLess(path.leaf<Leaf>().stop(path.leafSize() - 1), x)) {
+    path.leafOffset() = path.leaf<Leaf>().safeFind(path.leafOffset(), x);
+    return;
+  }
+
+  // Drop the current leaf.
+  path.pop();
+
+  // Search towards the root for a usable subtree.
+  if (path.height()) {
+    for (unsigned l = path.height() - 1; l; --l) {
+      if (!Traits::stopLess(path.node<Branch>(l).stop(path.offset(l)), x)) {
+        // The branch node at l+1 is usable
+        path.offset(l + 1) =
+          path.node<Branch>(l + 1).safeFind(path.offset(l + 1), x);
+        return pathFillFind(x);
+      }
+      path.pop();
+    }
+    // Is the level-1 Branch usable?
+    if (!Traits::stopLess(map->rootBranch().stop(path.offset(0)), x)) {
+      path.offset(1) = path.node<Branch>(1).safeFind(path.offset(1), x);
+      return pathFillFind(x);
+    }
+  }
+
+  // We reached the root.
+  setRoot(map->rootBranch().findFrom(path.offset(0), map->rootSize, x));
+  if (valid())
+    pathFillFind(x);
+}
+
+//===----------------------------------------------------------------------===//
+//---                       IntervalMap::iterator                         ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class IntervalMap<KeyT, ValT, N, Traits>::iterator : public const_iterator {
+  friend class IntervalMap;
+
+  using IdxPair = IntervalMapImpl::IdxPair;
+
+  explicit iterator(IntervalMap &map) : const_iterator(map) {}
+
+  void setNodeStop(unsigned Level, KeyT Stop);
+  bool insertNode(unsigned Level, IntervalMapImpl::NodeRef Node, KeyT Stop);
+  template <typename NodeT> bool overflow(unsigned Level);
+  void treeInsert(KeyT a, KeyT b, ValT y);
+  void eraseNode(unsigned Level);
+  void treeErase(bool UpdateRoot = true);
+  bool canCoalesceLeft(KeyT Start, ValT x);
+  bool canCoalesceRight(KeyT Stop, ValT x);
+
+public:
+  /// iterator - Create null iterator.
+  iterator() = default;
+
+  /// setStart - Move the start of the current interval.
+  /// This may cause coalescing with the previous interval.
+  /// @param a New start key, must not overlap the previous interval.
+  void setStart(KeyT a);
+
+  /// setStop - Move the end of the current interval.
+  /// This may cause coalescing with the following interval.
+  /// @param b New stop key, must not overlap the following interval.
+  void setStop(KeyT b);
+
+  /// setValue - Change the mapped value of the current interval.
+  /// This may cause coalescing with the previous and following intervals.
+  /// @param x New value.
+  void setValue(ValT x);
+
+  /// setStartUnchecked - Move the start of the current interval without
+  /// checking for coalescing or overlaps.
+  /// This should only be used when it is known that coalescing is not required.
+  /// @param a New start key.
+  void setStartUnchecked(KeyT a) { this->unsafeStart() = a; }
+
+  /// setStopUnchecked - Move the end of the current interval without checking
+  /// for coalescing or overlaps.
+  /// This should only be used when it is known that coalescing is not required.
+  /// @param b New stop key.
+  void setStopUnchecked(KeyT b) {
+    this->unsafeStop() = b;
+    // Update keys in branch nodes as well.
+    if (this->path.atLastEntry(this->path.height()))
+      setNodeStop(this->path.height(), b);
+  }
+
+  /// setValueUnchecked - Change the mapped value of the current interval
+  /// without checking for coalescing.
+  /// @param x New value.
+  void setValueUnchecked(ValT x) { this->unsafeValue() = x; }
+
+  /// insert - Insert mapping [a;b] -> y before the current position.
+  void insert(KeyT a, KeyT b, ValT y);
+
+  /// erase - Erase the current interval.
+  void erase();
+
+  iterator &operator++() {
+    const_iterator::operator++();
+    return *this;
+  }
+
+  iterator operator++(int) {
+    iterator tmp = *this;
+    operator++();
+    return tmp;
+  }
+
+  iterator &operator--() {
+    const_iterator::operator--();
+    return *this;
+  }
+
+  iterator operator--(int) {
+    iterator tmp = *this;
+    operator--();
+    return tmp;
+  }
+};
+
+/// canCoalesceLeft - Can the current interval coalesce to the left after
+/// changing start or value?
+/// @param Start New start of current interval.
+/// @param Value New value for current interval.
+/// @return True when updating the current interval would enable coalescing.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::canCoalesceLeft(KeyT Start, ValT Value) {
+  using namespace IntervalMapImpl;
+  Path &P = this->path;
+  if (!this->branched()) {
+    unsigned i = P.leafOffset();
+    RootLeaf &Node = P.leaf<RootLeaf>();
+    return i && Node.value(i-1) == Value &&
+                Traits::adjacent(Node.stop(i-1), Start);
+  }
+  // Branched.
+  if (unsigned i = P.leafOffset()) {
+    Leaf &Node = P.leaf<Leaf>();
+    return Node.value(i-1) == Value && Traits::adjacent(Node.stop(i-1), Start);
+  } else if (NodeRef NR = P.getLeftSibling(P.height())) {
+    unsigned i = NR.size() - 1;
+    Leaf &Node = NR.get<Leaf>();
+    return Node.value(i) == Value && Traits::adjacent(Node.stop(i), Start);
+  }
+  return false;
+}
+
+/// canCoalesceRight - Can the current interval coalesce to the right after
+/// changing stop or value?
+/// @param Stop New stop of current interval.
+/// @param Value New value for current interval.
+/// @return True when updating the current interval would enable coalescing.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::canCoalesceRight(KeyT Stop, ValT Value) {
+  using namespace IntervalMapImpl;
+  Path &P = this->path;
+  unsigned i = P.leafOffset() + 1;
+  if (!this->branched()) {
+    if (i >= P.leafSize())
+      return false;
+    RootLeaf &Node = P.leaf<RootLeaf>();
+    return Node.value(i) == Value && Traits::adjacent(Stop, Node.start(i));
+  }
+  // Branched.
+  if (i < P.leafSize()) {
+    Leaf &Node = P.leaf<Leaf>();
+    return Node.value(i) == Value && Traits::adjacent(Stop, Node.start(i));
+  } else if (NodeRef NR = P.getRightSibling(P.height())) {
+    Leaf &Node = NR.get<Leaf>();
+    return Node.value(0) == Value && Traits::adjacent(Stop, Node.start(0));
+  }
+  return false;
+}
+
+/// setNodeStop - Update the stop key of the current node at level and above.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setNodeStop(unsigned Level, KeyT Stop) {
+  // There are no references to the root node, so nothing to update.
+  if (!Level)
+    return;
+  IntervalMapImpl::Path &P = this->path;
+  // Update nodes pointing to the current node.
+  while (--Level) {
+    P.node<Branch>(Level).stop(P.offset(Level)) = Stop;
+    if (!P.atLastEntry(Level))
+      return;
+  }
+  // Update root separately since it has a different layout.
+  P.node<RootBranch>(Level).stop(P.offset(Level)) = Stop;
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setStart(KeyT a) {
+  assert(Traits::nonEmpty(a, this->stop()) && "Cannot move start beyond stop");
+  KeyT &CurStart = this->unsafeStart();
+  if (!Traits::startLess(a, CurStart) || !canCoalesceLeft(a, this->value())) {
+    CurStart = a;
+    return;
+  }
+  // Coalesce with the interval to the left.
+  --*this;
+  a = this->start();
+  erase();
+  setStartUnchecked(a);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setStop(KeyT b) {
+  assert(Traits::nonEmpty(this->start(), b) && "Cannot move stop beyond start");
+  if (Traits::startLess(b, this->stop()) ||
+      !canCoalesceRight(b, this->value())) {
+    setStopUnchecked(b);
+    return;
+  }
+  // Coalesce with interval to the right.
+  KeyT a = this->start();
+  erase();
+  setStartUnchecked(a);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setValue(ValT x) {
+  setValueUnchecked(x);
+  if (canCoalesceRight(this->stop(), x)) {
+    KeyT a = this->start();
+    erase();
+    setStartUnchecked(a);
+  }
+  if (canCoalesceLeft(this->start(), x)) {
+    --*this;
+    KeyT a = this->start();
+    erase();
+    setStartUnchecked(a);
+  }
+}
+
+/// insertNode - insert a node before the current path at level.
+/// Leave the current path pointing at the new node.
+/// @param Level path index of the node to be inserted.
+/// @param Node The node to be inserted.
+/// @param Stop The last index in the new node.
+/// @return True if the tree height was increased.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::insertNode(unsigned Level, IntervalMapImpl::NodeRef Node, KeyT Stop) {
+  assert(Level && "Cannot insert next to the root");
+  bool SplitRoot = false;
+  IntervalMap &IM = *this->map;
+  IntervalMapImpl::Path &P = this->path;
+
+  if (Level == 1) {
+    // Insert into the root branch node.
+    if (IM.rootSize < RootBranch::Capacity) {
+      IM.rootBranch().insert(P.offset(0), IM.rootSize, Node, Stop);
+      P.setSize(0, ++IM.rootSize);
+      P.reset(Level);
+      return SplitRoot;
+    }
+
+    // We need to split the root while keeping our position.
+    SplitRoot = true;
+    IdxPair Offset = IM.splitRoot(P.offset(0));
+    P.replaceRoot(&IM.rootBranch(), IM.rootSize, Offset);
+
+    // Fall through to insert at the new higher level.
+    ++Level;
+  }
+
+  // When inserting before end(), make sure we have a valid path.
+  P.legalizeForInsert(--Level);
+
+  // Insert into the branch node at Level-1.
+  if (P.size(Level) == Branch::Capacity) {
+    // Branch node is full, handle handle the overflow.
+    assert(!SplitRoot && "Cannot overflow after splitting the root");
+    SplitRoot = overflow<Branch>(Level);
+    Level += SplitRoot;
+  }
+  P.node<Branch>(Level).insert(P.offset(Level), P.size(Level), Node, Stop);
+  P.setSize(Level, P.size(Level) + 1);
+  if (P.atLastEntry(Level))
+    setNodeStop(Level, Stop);
+  P.reset(Level + 1);
+  return SplitRoot;
+}
+
+// insert
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::insert(KeyT a, KeyT b, ValT y) {
+  if (this->branched())
+    return treeInsert(a, b, y);
+  IntervalMap &IM = *this->map;
+  IntervalMapImpl::Path &P = this->path;
+
+  // Try simple root leaf insert.
+  unsigned Size = IM.rootLeaf().insertFrom(P.leafOffset(), IM.rootSize, a, b, y);
+
+  // Was the root node insert successful?
+  if (Size <= RootLeaf::Capacity) {
+    P.setSize(0, IM.rootSize = Size);
+    return;
+  }
+
+  // Root leaf node is full, we must branch.
+  IdxPair Offset = IM.branchRoot(P.leafOffset());
+  P.replaceRoot(&IM.rootBranch(), IM.rootSize, Offset);
+
+  // Now it fits in the new leaf.
+  treeInsert(a, b, y);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::treeInsert(KeyT a, KeyT b, ValT y) {
+  using namespace IntervalMapImpl;
+  Path &P = this->path;
+
+  if (!P.valid())
+    P.legalizeForInsert(this->map->height);
+
+  // Check if this insertion will extend the node to the left.
+  if (P.leafOffset() == 0 && Traits::startLess(a, P.leaf<Leaf>().start(0))) {
+    // Node is growing to the left, will it affect a left sibling node?
+    if (NodeRef Sib = P.getLeftSibling(P.height())) {
+      Leaf &SibLeaf = Sib.get<Leaf>();
+      unsigned SibOfs = Sib.size() - 1;
+      if (SibLeaf.value(SibOfs) == y &&
+          Traits::adjacent(SibLeaf.stop(SibOfs), a)) {
+        // This insertion will coalesce with the last entry in SibLeaf. We can
+        // handle it in two ways:
+        //  1. Extend SibLeaf.stop to b and be done, or
+        //  2. Extend a to SibLeaf, erase the SibLeaf entry and continue.
+        // We prefer 1., but need 2 when coalescing to the right as well.
+        Leaf &CurLeaf = P.leaf<Leaf>();
+        P.moveLeft(P.height());
+        if (Traits::stopLess(b, CurLeaf.start(0)) &&
+            (y != CurLeaf.value(0) || !Traits::adjacent(b, CurLeaf.start(0)))) {
+          // Easy, just extend SibLeaf and we're done.
+          setNodeStop(P.height(), SibLeaf.stop(SibOfs) = b);
+          return;
+        } else {
+          // We have both left and right coalescing. Erase the old SibLeaf entry
+          // and continue inserting the larger interval.
+          a = SibLeaf.start(SibOfs);
+          treeErase(/* UpdateRoot= */false);
+        }
+      }
+    } else {
+      // No left sibling means we are at begin(). Update cached bound.
+      this->map->rootBranchStart() = a;
+    }
+  }
+
+  // When we are inserting at the end of a leaf node, we must update stops.
+  unsigned Size = P.leafSize();
+  bool Grow = P.leafOffset() == Size;
+  Size = P.leaf<Leaf>().insertFrom(P.leafOffset(), Size, a, b, y);
+
+  // Leaf insertion unsuccessful? Overflow and try again.
+  if (Size > Leaf::Capacity) {
+    overflow<Leaf>(P.height());
+    Grow = P.leafOffset() == P.leafSize();
+    Size = P.leaf<Leaf>().insertFrom(P.leafOffset(), P.leafSize(), a, b, y);
+    assert(Size <= Leaf::Capacity && "overflow() didn't make room");
+  }
+
+  // Inserted, update offset and leaf size.
+  P.setSize(P.height(), Size);
+
+  // Insert was the last node entry, update stops.
+  if (Grow)
+    setNodeStop(P.height(), b);
+}
+
+/// erase - erase the current interval and move to the next position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::erase() {
+  IntervalMap &IM = *this->map;
+  IntervalMapImpl::Path &P = this->path;
+  assert(P.valid() && "Cannot erase end()");
+  if (this->branched())
+    return treeErase();
+  IM.rootLeaf().erase(P.leafOffset(), IM.rootSize);
+  P.setSize(0, --IM.rootSize);
+}
+
+/// treeErase - erase() for a branched tree.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::treeErase(bool UpdateRoot) {
+  IntervalMap &IM = *this->map;
+  IntervalMapImpl::Path &P = this->path;
+  Leaf &Node = P.leaf<Leaf>();
+
+  // Nodes are not allowed to become empty.
+  if (P.leafSize() == 1) {
+    IM.deleteNode(&Node);
+    eraseNode(IM.height);
+    // Update rootBranchStart if we erased begin().
+    if (UpdateRoot && IM.branched() && P.valid() && P.atBegin())
+      IM.rootBranchStart() = P.leaf<Leaf>().start(0);
+    return;
+  }
+
+  // Erase current entry.
+  Node.erase(P.leafOffset(), P.leafSize());
+  unsigned NewSize = P.leafSize() - 1;
+  P.setSize(IM.height, NewSize);
+  // When we erase the last entry, update stop and move to a legal position.
+  if (P.leafOffset() == NewSize) {
+    setNodeStop(IM.height, Node.stop(NewSize - 1));
+    P.moveRight(IM.height);
+  } else if (UpdateRoot && P.atBegin())
+    IM.rootBranchStart() = P.leaf<Leaf>().start(0);
+}
+
+/// eraseNode - Erase the current node at Level from its parent and move path to
+/// the first entry of the next sibling node.
+/// The node must be deallocated by the caller.
+/// @param Level 1..height, the root node cannot be erased.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::eraseNode(unsigned Level) {
+  assert(Level && "Cannot erase root node");
+  IntervalMap &IM = *this->map;
+  IntervalMapImpl::Path &P = this->path;
+
+  if (--Level == 0) {
+    IM.rootBranch().erase(P.offset(0), IM.rootSize);
+    P.setSize(0, --IM.rootSize);
+    // If this cleared the root, switch to height=0.
+    if (IM.empty()) {
+      IM.switchRootToLeaf();
+      this->setRoot(0);
+      return;
+    }
+  } else {
+    // Remove node ref from branch node at Level.
+    Branch &Parent = P.node<Branch>(Level);
+    if (P.size(Level) == 1) {
+      // Branch node became empty, remove it recursively.
+      IM.deleteNode(&Parent);
+      eraseNode(Level);
+    } else {
+      // Branch node won't become empty.
+      Parent.erase(P.offset(Level), P.size(Level));
+      unsigned NewSize = P.size(Level) - 1;
+      P.setSize(Level, NewSize);
+      // If we removed the last branch, update stop and move to a legal pos.
+      if (P.offset(Level) == NewSize) {
+        setNodeStop(Level, Parent.stop(NewSize - 1));
+        P.moveRight(Level);
+      }
+    }
+  }
+  // Update path cache for the new right sibling position.
+  if (P.valid()) {
+    P.reset(Level + 1);
+    P.offset(Level + 1) = 0;
+  }
+}
+
+/// overflow - Distribute entries of the current node evenly among
+/// its siblings and ensure that the current node is not full.
+/// This may require allocating a new node.
+/// @tparam NodeT The type of node at Level (Leaf or Branch).
+/// @param Level path index of the overflowing node.
+/// @return True when the tree height was changed.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+template <typename NodeT>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::overflow(unsigned Level) {
+  using namespace IntervalMapImpl;
+  Path &P = this->path;
+  unsigned CurSize[4];
+  NodeT *Node[4];
+  unsigned Nodes = 0;
+  unsigned Elements = 0;
+  unsigned Offset = P.offset(Level);
+
+  // Do we have a left sibling?
+  NodeRef LeftSib = P.getLeftSibling(Level);
+  if (LeftSib) {
+    Offset += Elements = CurSize[Nodes] = LeftSib.size();
+    Node[Nodes++] = &LeftSib.get<NodeT>();
+  }
+
+  // Current node.
+  Elements += CurSize[Nodes] = P.size(Level);
+  Node[Nodes++] = &P.node<NodeT>(Level);
+
+  // Do we have a right sibling?
+  NodeRef RightSib = P.getRightSibling(Level);
+  if (RightSib) {
+    Elements += CurSize[Nodes] = RightSib.size();
+    Node[Nodes++] = &RightSib.get<NodeT>();
+  }
+
+  // Do we need to allocate a new node?
+  unsigned NewNode = 0;
+  if (Elements + 1 > Nodes * NodeT::Capacity) {
+    // Insert NewNode at the penultimate position, or after a single node.
+    NewNode = Nodes == 1 ? 1 : Nodes - 1;
+    CurSize[Nodes] = CurSize[NewNode];
+    Node[Nodes] = Node[NewNode];
+    CurSize[NewNode] = 0;
+    Node[NewNode] = this->map->template newNode<NodeT>();
+    ++Nodes;
+  }
+
+  // Compute the new element distribution.
+  unsigned NewSize[4];
+  IdxPair NewOffset = distribute(Nodes, Elements, NodeT::Capacity,
+                                 CurSize, NewSize, Offset, true);
+  adjustSiblingSizes(Node, Nodes, CurSize, NewSize);
+
+  // Move current location to the leftmost node.
+  if (LeftSib)
+    P.moveLeft(Level);
+
+  // Elements have been rearranged, now update node sizes and stops.
+  bool SplitRoot = false;
+  unsigned Pos = 0;
+  while (true) {
+    KeyT Stop = Node[Pos]->stop(NewSize[Pos]-1);
+    if (NewNode && Pos == NewNode) {
+      SplitRoot = insertNode(Level, NodeRef(Node[Pos], NewSize[Pos]), Stop);
+      Level += SplitRoot;
+    } else {
+      P.setSize(Level, NewSize[Pos]);
+      setNodeStop(Level, Stop);
+    }
+    if (Pos + 1 == Nodes)
+      break;
+    P.moveRight(Level);
+    ++Pos;
+  }
+
+  // Where was I? Find NewOffset.
+  while(Pos != NewOffset.first) {
+    P.moveLeft(Level);
+    --Pos;
+  }
+  P.offset(Level) = NewOffset.second;
+  return SplitRoot;
+}
+
+//===----------------------------------------------------------------------===//
+//---                       IntervalMapOverlaps                           ----//
+//===----------------------------------------------------------------------===//
+
+/// IntervalMapOverlaps - Iterate over the overlaps of mapped intervals in two
+/// IntervalMaps. The maps may be different, but the KeyT and Traits types
+/// should be the same.
+///
+/// Typical uses:
+///
+/// 1. Test for overlap:
+///    bool overlap = IntervalMapOverlaps(a, b).valid();
+///
+/// 2. Enumerate overlaps:
+///    for (IntervalMapOverlaps I(a, b); I.valid() ; ++I) { ... }
+///
+template <typename MapA, typename MapB>
+class IntervalMapOverlaps {
+  using KeyType = typename MapA::KeyType;
+  using Traits = typename MapA::KeyTraits;
+
+  typename MapA::const_iterator posA;
+  typename MapB::const_iterator posB;
+
+  /// advance - Move posA and posB forward until reaching an overlap, or until
+  /// either meets end.
+  /// Don't move the iterators if they are already overlapping.
+  void advance() {
+    if (!valid())
+      return;
+
+    if (Traits::stopLess(posA.stop(), posB.start())) {
+      // A ends before B begins. Catch up.
+      posA.advanceTo(posB.start());
+      if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start()))
+        return;
+    } else if (Traits::stopLess(posB.stop(), posA.start())) {
+      // B ends before A begins. Catch up.
+      posB.advanceTo(posA.start());
+      if (!posB.valid() || !Traits::stopLess(posA.stop(), posB.start()))
+        return;
+    } else
+      // Already overlapping.
+      return;
+
+    while (true) {
+      // Make a.end > b.start.
+      posA.advanceTo(posB.start());
+      if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start()))
+        return;
+      // Make b.end > a.start.
+      posB.advanceTo(posA.start());
+      if (!posB.valid() || !Traits::stopLess(posA.stop(), posB.start()))
+        return;
+    }
+  }
+
+public:
+  /// IntervalMapOverlaps - Create an iterator for the overlaps of a and b.
+  IntervalMapOverlaps(const MapA &a, const MapB &b)
+    : posA(b.empty() ? a.end() : a.find(b.start())),
+      posB(posA.valid() ? b.find(posA.start()) : b.end()) { advance(); }
+
+  /// valid - Return true if iterator is at an overlap.
+  bool valid() const {
+    return posA.valid() && posB.valid();
+  }
+
+  /// a - access the left hand side in the overlap.
+  const typename MapA::const_iterator &a() const { return posA; }
+
+  /// b - access the right hand side in the overlap.
+  const typename MapB::const_iterator &b() const { return posB; }
+
+  /// start - Beginning of the overlapping interval.
+  KeyType start() const {
+    KeyType ak = a().start();
+    KeyType bk = b().start();
+    return Traits::startLess(ak, bk) ? bk : ak;
+  }
+
+  /// stop - End of the overlapping interval.
+  KeyType stop() const {
+    KeyType ak = a().stop();
+    KeyType bk = b().stop();
+    return Traits::startLess(ak, bk) ? ak : bk;
+  }
+
+  /// skipA - Move to the next overlap that doesn't involve a().
+  void skipA() {
+    ++posA;
+    advance();
+  }
+
+  /// skipB - Move to the next overlap that doesn't involve b().
+  void skipB() {
+    ++posB;
+    advance();
+  }
+
+  /// Preincrement - Move to the next overlap.
+  IntervalMapOverlaps &operator++() {
+    // Bump the iterator that ends first. The other one may have more overlaps.
+    if (Traits::startLess(posB.stop(), posA.stop()))
+      skipB();
+    else
+      skipA();
+    return *this;
+  }
+
+  /// advanceTo - Move to the first overlapping interval with
+  /// stopLess(x, stop()).
+  void advanceTo(KeyType x) {
+    if (!valid())
+      return;
+    // Make sure advanceTo sees monotonic keys.
+    if (Traits::stopLess(posA.stop(), x))
+      posA.advanceTo(x);
+    if (Traits::stopLess(posB.stop(), x))
+      posB.advanceTo(x);
+    advance();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_INTERVALMAP_H
diff --git a/linux-x64/clang/include/llvm/ADT/IntrusiveRefCntPtr.h b/linux-x64/clang/include/llvm/ADT/IntrusiveRefCntPtr.h
new file mode 100644
index 0000000..430ef86
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/IntrusiveRefCntPtr.h
@@ -0,0 +1,270 @@
+//==- llvm/ADT/IntrusiveRefCntPtr.h - Smart Refcounting Pointer --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RefCountedBase, ThreadSafeRefCountedBase, and
+// IntrusiveRefCntPtr classes.
+//
+// IntrusiveRefCntPtr is a smart pointer to an object which maintains a
+// reference count.  (ThreadSafe)RefCountedBase is a mixin class that adds a
+// refcount member variable and methods for updating the refcount.  An object
+// that inherits from (ThreadSafe)RefCountedBase deletes itself when its
+// refcount hits zero.
+//
+// For example:
+//
+//   class MyClass : public RefCountedBase<MyClass> {};
+//
+//   void foo() {
+//     // Constructing an IntrusiveRefCntPtr increases the pointee's refcount by
+//     // 1 (from 0 in this case).
+//     IntrusiveRefCntPtr<MyClass> Ptr1(new MyClass());
+//
+//     // Copying an IntrusiveRefCntPtr increases the pointee's refcount by 1.
+//     IntrusiveRefCntPtr<MyClass> Ptr2(Ptr1);
+//
+//     // Constructing an IntrusiveRefCntPtr has no effect on the object's
+//     // refcount.  After a move, the moved-from pointer is null.
+//     IntrusiveRefCntPtr<MyClass> Ptr3(std::move(Ptr1));
+//     assert(Ptr1 == nullptr);
+//
+//     // Clearing an IntrusiveRefCntPtr decreases the pointee's refcount by 1.
+//     Ptr2.reset();
+//
+//     // The object deletes itself when we return from the function, because
+//     // Ptr3's destructor decrements its refcount to 0.
+//   }
+//
+// You can use IntrusiveRefCntPtr with isa<T>(), dyn_cast<T>(), etc.:
+//
+//   IntrusiveRefCntPtr<MyClass> Ptr(new MyClass());
+//   OtherClass *Other = dyn_cast<OtherClass>(Ptr);  // Ptr.get() not required
+//
+// IntrusiveRefCntPtr works with any class that
+//
+//  - inherits from (ThreadSafe)RefCountedBase,
+//  - has Retain() and Release() methods, or
+//  - specializes IntrusiveRefCntPtrInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INTRUSIVEREFCNTPTR_H
+#define LLVM_ADT_INTRUSIVEREFCNTPTR_H
+
+#include <atomic>
+#include <cassert>
+#include <cstddef>
+
+namespace llvm {
+
+/// A CRTP mixin class that adds reference counting to a type.
+///
+/// The lifetime of an object which inherits from RefCountedBase is managed by
+/// calls to Release() and Retain(), which increment and decrement the object's
+/// refcount, respectively.  When a Release() call decrements the refcount to 0,
+/// the object deletes itself.
+template <class Derived> class RefCountedBase {
+  mutable unsigned RefCount = 0;
+
+public:
+  RefCountedBase() = default;
+  RefCountedBase(const RefCountedBase &) {}
+
+  void Retain() const { ++RefCount; }
+
+  void Release() const {
+    assert(RefCount > 0 && "Reference count is already zero.");
+    if (--RefCount == 0)
+      delete static_cast<const Derived *>(this);
+  }
+};
+
+/// A thread-safe version of \c RefCountedBase.
+template <class Derived> class ThreadSafeRefCountedBase {
+  mutable std::atomic<int> RefCount;
+
+protected:
+  ThreadSafeRefCountedBase() : RefCount(0) {}
+
+public:
+  void Retain() const { RefCount.fetch_add(1, std::memory_order_relaxed); }
+
+  void Release() const {
+    int NewRefCount = RefCount.fetch_sub(1, std::memory_order_acq_rel) - 1;
+    assert(NewRefCount >= 0 && "Reference count was already zero.");
+    if (NewRefCount == 0)
+      delete static_cast<const Derived *>(this);
+  }
+};
+
+/// Class you can specialize to provide custom retain/release functionality for
+/// a type.
+///
+/// Usually specializing this class is not necessary, as IntrusiveRefCntPtr
+/// works with any type which defines Retain() and Release() functions -- you
+/// can define those functions yourself if RefCountedBase doesn't work for you.
+///
+/// One case when you might want to specialize this type is if you have
+///  - Foo.h defines type Foo and includes Bar.h, and
+///  - Bar.h uses IntrusiveRefCntPtr<Foo> in inline functions.
+///
+/// Because Foo.h includes Bar.h, Bar.h can't include Foo.h in order to pull in
+/// the declaration of Foo.  Without the declaration of Foo, normally Bar.h
+/// wouldn't be able to use IntrusiveRefCntPtr<Foo>, which wants to call
+/// T::Retain and T::Release.
+///
+/// To resolve this, Bar.h could include a third header, FooFwd.h, which
+/// forward-declares Foo and specializes IntrusiveRefCntPtrInfo<Foo>.  Then
+/// Bar.h could use IntrusiveRefCntPtr<Foo>, although it still couldn't call any
+/// functions on Foo itself, because Foo would be an incomplete type.
+template <typename T> struct IntrusiveRefCntPtrInfo {
+  static void retain(T *obj) { obj->Retain(); }
+  static void release(T *obj) { obj->Release(); }
+};
+
+/// A smart pointer to a reference-counted object that inherits from
+/// RefCountedBase or ThreadSafeRefCountedBase.
+///
+/// This class increments its pointee's reference count when it is created, and
+/// decrements its refcount when it's destroyed (or is changed to point to a
+/// different object).
+template <typename T> class IntrusiveRefCntPtr {
+  T *Obj = nullptr;
+
+public:
+  using element_type = T;
+
+  explicit IntrusiveRefCntPtr() = default;
+  IntrusiveRefCntPtr(T *obj) : Obj(obj) { retain(); }
+  IntrusiveRefCntPtr(const IntrusiveRefCntPtr &S) : Obj(S.Obj) { retain(); }
+  IntrusiveRefCntPtr(IntrusiveRefCntPtr &&S) : Obj(S.Obj) { S.Obj = nullptr; }
+
+  template <class X>
+  IntrusiveRefCntPtr(IntrusiveRefCntPtr<X> &&S) : Obj(S.get()) {
+    S.Obj = nullptr;
+  }
+
+  template <class X>
+  IntrusiveRefCntPtr(const IntrusiveRefCntPtr<X> &S) : Obj(S.get()) {
+    retain();
+  }
+
+  ~IntrusiveRefCntPtr() { release(); }
+
+  IntrusiveRefCntPtr &operator=(IntrusiveRefCntPtr S) {
+    swap(S);
+    return *this;
+  }
+
+  T &operator*() const { return *Obj; }
+  T *operator->() const { return Obj; }
+  T *get() const { return Obj; }
+  explicit operator bool() const { return Obj; }
+
+  void swap(IntrusiveRefCntPtr &other) {
+    T *tmp = other.Obj;
+    other.Obj = Obj;
+    Obj = tmp;
+  }
+
+  void reset() {
+    release();
+    Obj = nullptr;
+  }
+
+  void resetWithoutRelease() { Obj = nullptr; }
+
+private:
+  void retain() {
+    if (Obj)
+      IntrusiveRefCntPtrInfo<T>::retain(Obj);
+  }
+
+  void release() {
+    if (Obj)
+      IntrusiveRefCntPtrInfo<T>::release(Obj);
+  }
+
+  template <typename X> friend class IntrusiveRefCntPtr;
+};
+
+template <class T, class U>
+inline bool operator==(const IntrusiveRefCntPtr<T> &A,
+                       const IntrusiveRefCntPtr<U> &B) {
+  return A.get() == B.get();
+}
+
+template <class T, class U>
+inline bool operator!=(const IntrusiveRefCntPtr<T> &A,
+                       const IntrusiveRefCntPtr<U> &B) {
+  return A.get() != B.get();
+}
+
+template <class T, class U>
+inline bool operator==(const IntrusiveRefCntPtr<T> &A, U *B) {
+  return A.get() == B;
+}
+
+template <class T, class U>
+inline bool operator!=(const IntrusiveRefCntPtr<T> &A, U *B) {
+  return A.get() != B;
+}
+
+template <class T, class U>
+inline bool operator==(T *A, const IntrusiveRefCntPtr<U> &B) {
+  return A == B.get();
+}
+
+template <class T, class U>
+inline bool operator!=(T *A, const IntrusiveRefCntPtr<U> &B) {
+  return A != B.get();
+}
+
+template <class T>
+bool operator==(std::nullptr_t A, const IntrusiveRefCntPtr<T> &B) {
+  return !B;
+}
+
+template <class T>
+bool operator==(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
+  return B == A;
+}
+
+template <class T>
+bool operator!=(std::nullptr_t A, const IntrusiveRefCntPtr<T> &B) {
+  return !(A == B);
+}
+
+template <class T>
+bool operator!=(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
+  return !(A == B);
+}
+
+// Make IntrusiveRefCntPtr work with dyn_cast, isa, and the other idioms from
+// Casting.h.
+template <typename From> struct simplify_type;
+
+template <class T> struct simplify_type<IntrusiveRefCntPtr<T>> {
+  using SimpleType = T *;
+
+  static SimpleType getSimplifiedValue(IntrusiveRefCntPtr<T> &Val) {
+    return Val.get();
+  }
+};
+
+template <class T> struct simplify_type<const IntrusiveRefCntPtr<T>> {
+  using SimpleType = /*const*/ T *;
+
+  static SimpleType getSimplifiedValue(const IntrusiveRefCntPtr<T> &Val) {
+    return Val.get();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_INTRUSIVEREFCNTPTR_H
diff --git a/linux-x64/clang/include/llvm/ADT/MapVector.h b/linux-x64/clang/include/llvm/ADT/MapVector.h
new file mode 100644
index 0000000..f69f8fd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/MapVector.h
@@ -0,0 +1,236 @@
+//===- llvm/ADT/MapVector.h - Map w/ deterministic value order --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a map that provides insertion order iteration. The
+// interface is purposefully minimal. The key is assumed to be cheap to copy
+// and 2 copies are kept, one for indexing in a DenseMap, one for iteration in
+// a std::vector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_MAPVECTOR_H
+#define LLVM_ADT_MAPVECTOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+/// This class implements a map that also provides access to all stored values
+/// in a deterministic order. The values are kept in a std::vector and the
+/// mapping is done with DenseMap from Keys to indexes in that vector.
+template<typename KeyT, typename ValueT,
+         typename MapType = DenseMap<KeyT, unsigned>,
+         typename VectorType = std::vector<std::pair<KeyT, ValueT>>>
+class MapVector {
+  MapType Map;
+  VectorType Vector;
+
+public:
+  using value_type = typename VectorType::value_type;
+  using size_type = typename VectorType::size_type;
+
+  using iterator = typename VectorType::iterator;
+  using const_iterator = typename VectorType::const_iterator;
+  using reverse_iterator = typename VectorType::reverse_iterator;
+  using const_reverse_iterator = typename VectorType::const_reverse_iterator;
+
+  /// Clear the MapVector and return the underlying vector.
+  VectorType takeVector() {
+    Map.clear();
+    return std::move(Vector);
+  }
+
+  size_type size() const { return Vector.size(); }
+
+  /// Grow the MapVector so that it can contain at least \p NumEntries items
+  /// before resizing again.
+  void reserve(size_type NumEntries) {
+    Map.reserve(NumEntries);
+    Vector.reserve(NumEntries);
+  }
+
+  iterator begin() { return Vector.begin(); }
+  const_iterator begin() const { return Vector.begin(); }
+  iterator end() { return Vector.end(); }
+  const_iterator end() const { return Vector.end(); }
+
+  reverse_iterator rbegin() { return Vector.rbegin(); }
+  const_reverse_iterator rbegin() const { return Vector.rbegin(); }
+  reverse_iterator rend() { return Vector.rend(); }
+  const_reverse_iterator rend() const { return Vector.rend(); }
+
+  bool empty() const {
+    return Vector.empty();
+  }
+
+  std::pair<KeyT, ValueT>       &front()       { return Vector.front(); }
+  const std::pair<KeyT, ValueT> &front() const { return Vector.front(); }
+  std::pair<KeyT, ValueT>       &back()        { return Vector.back(); }
+  const std::pair<KeyT, ValueT> &back()  const { return Vector.back(); }
+
+  void clear() {
+    Map.clear();
+    Vector.clear();
+  }
+
+  void swap(MapVector &RHS) {
+    std::swap(Map, RHS.Map);
+    std::swap(Vector, RHS.Vector);
+  }
+
+  ValueT &operator[](const KeyT &Key) {
+    std::pair<KeyT, unsigned> Pair = std::make_pair(Key, 0);
+    std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
+    unsigned &I = Result.first->second;
+    if (Result.second) {
+      Vector.push_back(std::make_pair(Key, ValueT()));
+      I = Vector.size() - 1;
+    }
+    return Vector[I].second;
+  }
+
+  // Returns a copy of the value.  Only allowed if ValueT is copyable.
+  ValueT lookup(const KeyT &Key) const {
+    static_assert(std::is_copy_constructible<ValueT>::value,
+                  "Cannot call lookup() if ValueT is not copyable.");
+    typename MapType::const_iterator Pos = Map.find(Key);
+    return Pos == Map.end()? ValueT() : Vector[Pos->second].second;
+  }
+
+  std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
+    std::pair<KeyT, unsigned> Pair = std::make_pair(KV.first, 0);
+    std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
+    unsigned &I = Result.first->second;
+    if (Result.second) {
+      Vector.push_back(std::make_pair(KV.first, KV.second));
+      I = Vector.size() - 1;
+      return std::make_pair(std::prev(end()), true);
+    }
+    return std::make_pair(begin() + I, false);
+  }
+
+  std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
+    // Copy KV.first into the map, then move it into the vector.
+    std::pair<KeyT, unsigned> Pair = std::make_pair(KV.first, 0);
+    std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
+    unsigned &I = Result.first->second;
+    if (Result.second) {
+      Vector.push_back(std::move(KV));
+      I = Vector.size() - 1;
+      return std::make_pair(std::prev(end()), true);
+    }
+    return std::make_pair(begin() + I, false);
+  }
+
+  size_type count(const KeyT &Key) const {
+    typename MapType::const_iterator Pos = Map.find(Key);
+    return Pos == Map.end()? 0 : 1;
+  }
+
+  iterator find(const KeyT &Key) {
+    typename MapType::const_iterator Pos = Map.find(Key);
+    return Pos == Map.end()? Vector.end() :
+                            (Vector.begin() + Pos->second);
+  }
+
+  const_iterator find(const KeyT &Key) const {
+    typename MapType::const_iterator Pos = Map.find(Key);
+    return Pos == Map.end()? Vector.end() :
+                            (Vector.begin() + Pos->second);
+  }
+
+  /// \brief Remove the last element from the vector.
+  void pop_back() {
+    typename MapType::iterator Pos = Map.find(Vector.back().first);
+    Map.erase(Pos);
+    Vector.pop_back();
+  }
+
+  /// \brief Remove the element given by Iterator.
+  ///
+  /// Returns an iterator to the element following the one which was removed,
+  /// which may be end().
+  ///
+  /// \note This is a deceivingly expensive operation (linear time).  It's
+  /// usually better to use \a remove_if() if possible.
+  typename VectorType::iterator erase(typename VectorType::iterator Iterator) {
+    Map.erase(Iterator->first);
+    auto Next = Vector.erase(Iterator);
+    if (Next == Vector.end())
+      return Next;
+
+    // Update indices in the map.
+    size_t Index = Next - Vector.begin();
+    for (auto &I : Map) {
+      assert(I.second != Index && "Index was already erased!");
+      if (I.second > Index)
+        --I.second;
+    }
+    return Next;
+  }
+
+  /// \brief Remove all elements with the key value Key.
+  ///
+  /// Returns the number of elements removed.
+  size_type erase(const KeyT &Key) {
+    auto Iterator = find(Key);
+    if (Iterator == end())
+      return 0;
+    erase(Iterator);
+    return 1;
+  }
+
+  /// \brief Remove the elements that match the predicate.
+  ///
+  /// Erase all elements that match \c Pred in a single pass.  Takes linear
+  /// time.
+  template <class Predicate> void remove_if(Predicate Pred);
+};
+
+template <typename KeyT, typename ValueT, typename MapType, typename VectorType>
+template <class Function>
+void MapVector<KeyT, ValueT, MapType, VectorType>::remove_if(Function Pred) {
+  auto O = Vector.begin();
+  for (auto I = O, E = Vector.end(); I != E; ++I) {
+    if (Pred(*I)) {
+      // Erase from the map.
+      Map.erase(I->first);
+      continue;
+    }
+
+    if (I != O) {
+      // Move the value and update the index in the map.
+      *O = std::move(*I);
+      Map[O->first] = O - Vector.begin();
+    }
+    ++O;
+  }
+  // Erase trailing entries in the vector.
+  Vector.erase(O, Vector.end());
+}
+
+/// \brief A MapVector that performs no allocations if smaller than a certain
+/// size.
+template <typename KeyT, typename ValueT, unsigned N>
+struct SmallMapVector
+    : MapVector<KeyT, ValueT, SmallDenseMap<KeyT, unsigned, N>,
+                SmallVector<std::pair<KeyT, ValueT>, N>> {
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_MAPVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/None.h b/linux-x64/clang/include/llvm/ADT/None.h
new file mode 100644
index 0000000..c7a99c6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/None.h
@@ -0,0 +1,27 @@
+//===-- None.h - Simple null value for implicit construction ------*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file provides None, an enumerator for use in implicit constructors
+//  of various (usually templated) types to make such construction more
+//  terse.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_NONE_H
+#define LLVM_ADT_NONE_H
+
+namespace llvm {
+/// \brief A simple null object to allow implicit construction of Optional<T>
+/// and similar types without having to spell out the specialization's name.
+// (constant value 1 in an attempt to workaround MSVC build issue... )
+enum class NoneType { None = 1 };
+const NoneType None = NoneType::None;
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/Optional.h b/linux-x64/clang/include/llvm/ADT/Optional.h
new file mode 100644
index 0000000..353e5d0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/Optional.h
@@ -0,0 +1,346 @@
+//===- Optional.h - Simple variant for passing optional values --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file provides Optional, a template class modeled in the spirit of
+//  OCaml's 'opt' variant.  The idea is to strongly type whether or not
+//  a value can be optional.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_OPTIONAL_H
+#define LLVM_ADT_OPTIONAL_H
+
+#include "llvm/ADT/None.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm>
+#include <cassert>
+#include <new>
+#include <utility>
+
+namespace llvm {
+
+namespace optional_detail {
+/// Storage for any type.
+template <typename T, bool IsPodLike> struct OptionalStorage {
+  AlignedCharArrayUnion<T> storage;
+  bool hasVal = false;
+
+  OptionalStorage() = default;
+
+  OptionalStorage(const T &y) : hasVal(true) { new (storage.buffer) T(y); }
+  OptionalStorage(const OptionalStorage &O) : hasVal(O.hasVal) {
+    if (hasVal)
+      new (storage.buffer) T(*O.getPointer());
+  }
+  OptionalStorage(T &&y) : hasVal(true) {
+    new (storage.buffer) T(std::forward<T>(y));
+  }
+  OptionalStorage(OptionalStorage &&O) : hasVal(O.hasVal) {
+    if (O.hasVal) {
+      new (storage.buffer) T(std::move(*O.getPointer()));
+    }
+  }
+
+  OptionalStorage &operator=(T &&y) {
+    if (hasVal)
+      *getPointer() = std::move(y);
+    else {
+      new (storage.buffer) T(std::move(y));
+      hasVal = true;
+    }
+    return *this;
+  }
+  OptionalStorage &operator=(OptionalStorage &&O) {
+    if (!O.hasVal)
+      reset();
+    else {
+      *this = std::move(*O.getPointer());
+    }
+    return *this;
+  }
+
+  // FIXME: these assignments (& the equivalent const T&/const Optional& ctors)
+  // could be made more efficient by passing by value, possibly unifying them
+  // with the rvalue versions above - but this could place a different set of
+  // requirements (notably: the existence of a default ctor) when implemented
+  // in that way. Careful SFINAE to avoid such pitfalls would be required.
+  OptionalStorage &operator=(const T &y) {
+    if (hasVal)
+      *getPointer() = y;
+    else {
+      new (storage.buffer) T(y);
+      hasVal = true;
+    }
+    return *this;
+  }
+  OptionalStorage &operator=(const OptionalStorage &O) {
+    if (!O.hasVal)
+      reset();
+    else
+      *this = *O.getPointer();
+    return *this;
+  }
+
+  ~OptionalStorage() { reset(); }
+
+  void reset() {
+    if (hasVal) {
+      (*getPointer()).~T();
+      hasVal = false;
+    }
+  }
+
+  T *getPointer() {
+    assert(hasVal);
+    return reinterpret_cast<T *>(storage.buffer);
+  }
+  const T *getPointer() const {
+    assert(hasVal);
+    return reinterpret_cast<const T *>(storage.buffer);
+  }
+};
+
+#if !defined(__GNUC__) || defined(__clang__) // GCC up to GCC7 miscompiles this.
+/// Storage for trivially copyable types only.
+template <typename T> struct OptionalStorage<T, true> {
+  AlignedCharArrayUnion<T> storage;
+  bool hasVal = false;
+
+  OptionalStorage() = default;
+
+  OptionalStorage(const T &y) : hasVal(true) { new (storage.buffer) T(y); }
+  OptionalStorage &operator=(const T &y) {
+    *reinterpret_cast<T *>(storage.buffer) = y;
+    hasVal = true;
+    return *this;
+  }
+
+  void reset() { hasVal = false; }
+};
+#endif
+} // namespace optional_detail
+
+template <typename T> class Optional {
+  optional_detail::OptionalStorage<T, isPodLike<T>::value> Storage;
+
+public:
+  using value_type = T;
+
+  constexpr Optional() {}
+  constexpr Optional(NoneType) {}
+
+  Optional(const T &y) : Storage(y) {}
+  Optional(const Optional &O) = default;
+
+  Optional(T &&y) : Storage(std::forward<T>(y)) {}
+  Optional(Optional &&O) = default;
+
+  Optional &operator=(T &&y) {
+    Storage = std::move(y);
+    return *this;
+  }
+  Optional &operator=(Optional &&O) = default;
+
+  /// Create a new object by constructing it in place with the given arguments.
+  template <typename... ArgTypes> void emplace(ArgTypes &&... Args) {
+    reset();
+    Storage.hasVal = true;
+    new (getPointer()) T(std::forward<ArgTypes>(Args)...);
+  }
+
+  static inline Optional create(const T *y) {
+    return y ? Optional(*y) : Optional();
+  }
+
+  Optional &operator=(const T &y) {
+    Storage = y;
+    return *this;
+  }
+  Optional &operator=(const Optional &O) = default;
+
+  void reset() { Storage.reset(); }
+
+  const T *getPointer() const {
+    assert(Storage.hasVal);
+    return reinterpret_cast<const T *>(Storage.storage.buffer);
+  }
+  T *getPointer() {
+    assert(Storage.hasVal);
+    return reinterpret_cast<T *>(Storage.storage.buffer);
+  }
+  const T &getValue() const LLVM_LVALUE_FUNCTION { return *getPointer(); }
+  T &getValue() LLVM_LVALUE_FUNCTION { return *getPointer(); }
+
+  explicit operator bool() const { return Storage.hasVal; }
+  bool hasValue() const { return Storage.hasVal; }
+  const T *operator->() const { return getPointer(); }
+  T *operator->() { return getPointer(); }
+  const T &operator*() const LLVM_LVALUE_FUNCTION { return *getPointer(); }
+  T &operator*() LLVM_LVALUE_FUNCTION { return *getPointer(); }
+
+  template <typename U>
+  constexpr T getValueOr(U &&value) const LLVM_LVALUE_FUNCTION {
+    return hasValue() ? getValue() : std::forward<U>(value);
+  }
+
+#if LLVM_HAS_RVALUE_REFERENCE_THIS
+  T &&getValue() && { return std::move(*getPointer()); }
+  T &&operator*() && { return std::move(*getPointer()); }
+
+  template <typename U>
+  T getValueOr(U &&value) && {
+    return hasValue() ? std::move(getValue()) : std::forward<U>(value);
+  }
+#endif
+};
+
+template <typename T> struct isPodLike<Optional<T>> {
+  // An Optional<T> is pod-like if T is.
+  static const bool value = isPodLike<T>::value;
+};
+
+template <typename T, typename U>
+bool operator==(const Optional<T> &X, const Optional<U> &Y) {
+  if (X && Y)
+    return *X == *Y;
+  return X.hasValue() == Y.hasValue();
+}
+
+template <typename T, typename U>
+bool operator!=(const Optional<T> &X, const Optional<U> &Y) {
+  return !(X == Y);
+}
+
+template <typename T, typename U>
+bool operator<(const Optional<T> &X, const Optional<U> &Y) {
+  if (X && Y)
+    return *X < *Y;
+  return X.hasValue() < Y.hasValue();
+}
+
+template <typename T, typename U>
+bool operator<=(const Optional<T> &X, const Optional<U> &Y) {
+  return !(Y < X);
+}
+
+template <typename T, typename U>
+bool operator>(const Optional<T> &X, const Optional<U> &Y) {
+  return Y < X;
+}
+
+template <typename T, typename U>
+bool operator>=(const Optional<T> &X, const Optional<U> &Y) {
+  return !(X < Y);
+}
+
+template<typename T>
+bool operator==(const Optional<T> &X, NoneType) {
+  return !X;
+}
+
+template<typename T>
+bool operator==(NoneType, const Optional<T> &X) {
+  return X == None;
+}
+
+template<typename T>
+bool operator!=(const Optional<T> &X, NoneType) {
+  return !(X == None);
+}
+
+template<typename T>
+bool operator!=(NoneType, const Optional<T> &X) {
+  return X != None;
+}
+
+template <typename T> bool operator<(const Optional<T> &X, NoneType) {
+  return false;
+}
+
+template <typename T> bool operator<(NoneType, const Optional<T> &X) {
+  return X.hasValue();
+}
+
+template <typename T> bool operator<=(const Optional<T> &X, NoneType) {
+  return !(None < X);
+}
+
+template <typename T> bool operator<=(NoneType, const Optional<T> &X) {
+  return !(X < None);
+}
+
+template <typename T> bool operator>(const Optional<T> &X, NoneType) {
+  return None < X;
+}
+
+template <typename T> bool operator>(NoneType, const Optional<T> &X) {
+  return X < None;
+}
+
+template <typename T> bool operator>=(const Optional<T> &X, NoneType) {
+  return None <= X;
+}
+
+template <typename T> bool operator>=(NoneType, const Optional<T> &X) {
+  return X <= None;
+}
+
+template <typename T> bool operator==(const Optional<T> &X, const T &Y) {
+  return X && *X == Y;
+}
+
+template <typename T> bool operator==(const T &X, const Optional<T> &Y) {
+  return Y && X == *Y;
+}
+
+template <typename T> bool operator!=(const Optional<T> &X, const T &Y) {
+  return !(X == Y);
+}
+
+template <typename T> bool operator!=(const T &X, const Optional<T> &Y) {
+  return !(X == Y);
+}
+
+template <typename T> bool operator<(const Optional<T> &X, const T &Y) {
+  return !X || *X < Y;
+}
+
+template <typename T> bool operator<(const T &X, const Optional<T> &Y) {
+  return Y && X < *Y;
+}
+
+template <typename T> bool operator<=(const Optional<T> &X, const T &Y) {
+  return !(Y < X);
+}
+
+template <typename T> bool operator<=(const T &X, const Optional<T> &Y) {
+  return !(Y < X);
+}
+
+template <typename T> bool operator>(const Optional<T> &X, const T &Y) {
+  return Y < X;
+}
+
+template <typename T> bool operator>(const T &X, const Optional<T> &Y) {
+  return Y < X;
+}
+
+template <typename T> bool operator>=(const Optional<T> &X, const T &Y) {
+  return !(X < Y);
+}
+
+template <typename T> bool operator>=(const T &X, const Optional<T> &Y) {
+  return !(X < Y);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_OPTIONAL_H
diff --git a/linux-x64/clang/include/llvm/ADT/PackedVector.h b/linux-x64/clang/include/llvm/ADT/PackedVector.h
new file mode 100644
index 0000000..95adc29
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/PackedVector.h
@@ -0,0 +1,151 @@
+//===- llvm/ADT/PackedVector.h - Packed values vector -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PackedVector class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_PACKEDVECTOR_H
+#define LLVM_ADT_PACKEDVECTOR_H
+
+#include "llvm/ADT/BitVector.h"
+#include <cassert>
+#include <limits>
+
+namespace llvm {
+
+template <typename T, unsigned BitNum, typename BitVectorTy, bool isSigned>
+class PackedVectorBase;
+
+// This won't be necessary if we can specialize members without specializing
+// the parent template.
+template <typename T, unsigned BitNum, typename BitVectorTy>
+class PackedVectorBase<T, BitNum, BitVectorTy, false> {
+protected:
+  static T getValue(const BitVectorTy &Bits, unsigned Idx) {
+    T val = T();
+    for (unsigned i = 0; i != BitNum; ++i)
+      val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i));
+    return val;
+  }
+
+  static void setValue(BitVectorTy &Bits, unsigned Idx, T val) {
+    assert((val >> BitNum) == 0 && "value is too big");
+    for (unsigned i = 0; i != BitNum; ++i)
+      Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i);
+  }
+};
+
+template <typename T, unsigned BitNum, typename BitVectorTy>
+class PackedVectorBase<T, BitNum, BitVectorTy, true> {
+protected:
+  static T getValue(const BitVectorTy &Bits, unsigned Idx) {
+    T val = T();
+    for (unsigned i = 0; i != BitNum-1; ++i)
+      val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i));
+    if (Bits[(Idx << (BitNum-1)) + BitNum-1])
+      val = ~val;
+    return val;
+  }
+
+  static void setValue(BitVectorTy &Bits, unsigned Idx, T val) {
+    if (val < 0) {
+      val = ~val;
+      Bits.set((Idx << (BitNum-1)) + BitNum-1);
+    }
+    assert((val >> (BitNum-1)) == 0 && "value is too big");
+    for (unsigned i = 0; i != BitNum-1; ++i)
+      Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i);
+  }
+};
+
+/// \brief Store a vector of values using a specific number of bits for each
+/// value. Both signed and unsigned types can be used, e.g
+/// @code
+///   PackedVector<signed, 2> vec;
+/// @endcode
+/// will create a vector accepting values -2, -1, 0, 1. Any other value will hit
+/// an assertion.
+template <typename T, unsigned BitNum, typename BitVectorTy = BitVector>
+class PackedVector : public PackedVectorBase<T, BitNum, BitVectorTy,
+                                            std::numeric_limits<T>::is_signed> {
+  BitVectorTy Bits;
+  using base = PackedVectorBase<T, BitNum, BitVectorTy,
+                                std::numeric_limits<T>::is_signed>;
+
+public:
+  class reference {
+    PackedVector &Vec;
+    const unsigned Idx;
+
+  public:
+    reference() = delete;
+    reference(PackedVector &vec, unsigned idx) : Vec(vec), Idx(idx) {}
+
+    reference &operator=(T val) {
+      Vec.setValue(Vec.Bits, Idx, val);
+      return *this;
+    }
+
+    operator T() const {
+      return Vec.getValue(Vec.Bits, Idx);
+    }
+  };
+
+  PackedVector() = default;
+  explicit PackedVector(unsigned size) : Bits(size << (BitNum-1)) {}
+
+  bool empty() const { return Bits.empty(); }
+
+  unsigned size() const { return Bits.size() >> (BitNum - 1); }
+
+  void clear() { Bits.clear(); }
+
+  void resize(unsigned N) { Bits.resize(N << (BitNum - 1)); }
+
+  void reserve(unsigned N) { Bits.reserve(N << (BitNum-1)); }
+
+  PackedVector &reset() {
+    Bits.reset();
+    return *this;
+  }
+
+  void push_back(T val) {
+    resize(size()+1);
+    (*this)[size()-1] = val;
+  }
+
+  reference operator[](unsigned Idx) {
+    return reference(*this, Idx);
+  }
+
+  T operator[](unsigned Idx) const {
+    return base::getValue(Bits, Idx);
+  }
+
+  bool operator==(const PackedVector &RHS) const {
+    return Bits == RHS.Bits;
+  }
+
+  bool operator!=(const PackedVector &RHS) const {
+    return Bits != RHS.Bits;
+  }
+
+  PackedVector &operator|=(const PackedVector &RHS) {
+    Bits |= RHS.Bits;
+    return *this;
+  }
+};
+
+// Leave BitNum=0 undefined.
+template <typename T> class PackedVector<T, 0>;
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_PACKEDVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/PointerEmbeddedInt.h b/linux-x64/clang/include/llvm/ADT/PointerEmbeddedInt.h
new file mode 100644
index 0000000..ab4e104
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/PointerEmbeddedInt.h
@@ -0,0 +1,120 @@
+//===- llvm/ADT/PointerEmbeddedInt.h ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POINTEREMBEDDEDINT_H
+#define LLVM_ADT_POINTEREMBEDDEDINT_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
+#include <climits>
+#include <cstdint>
+#include <type_traits>
+
+namespace llvm {
+
+/// Utility to embed an integer into a pointer-like type. This is specifically
+/// intended to allow embedding integers where fewer bits are required than
+/// exist in a pointer, and the integer can participate in abstractions along
+/// side other pointer-like types. For example it can be placed into a \c
+/// PointerSumType or \c PointerUnion.
+///
+/// Note that much like pointers, an integer value of zero has special utility
+/// due to boolean conversions. For example, a non-null value can be tested for
+/// in the above abstractions without testing the particular active member.
+/// Also, the default constructed value zero initializes the integer.
+template <typename IntT, int Bits = sizeof(IntT) * CHAR_BIT>
+class PointerEmbeddedInt {
+  uintptr_t Value = 0;
+
+  // Note: This '<' is correct; using '<=' would result in some shifts
+  // overflowing their storage types.
+  static_assert(Bits < sizeof(uintptr_t) * CHAR_BIT,
+                "Cannot embed more bits than we have in a pointer!");
+
+  enum : uintptr_t {
+    // We shift as many zeros into the value as we can while preserving the
+    // number of bits desired for the integer.
+    Shift = sizeof(uintptr_t) * CHAR_BIT - Bits,
+
+    // We also want to be able to mask out the preserved bits for asserts.
+    Mask = static_cast<uintptr_t>(-1) << Bits
+  };
+
+  struct RawValueTag {
+    explicit RawValueTag() = default;
+  };
+
+  friend struct PointerLikeTypeTraits<PointerEmbeddedInt>;
+
+  explicit PointerEmbeddedInt(uintptr_t Value, RawValueTag) : Value(Value) {}
+
+public:
+  PointerEmbeddedInt() = default;
+
+  PointerEmbeddedInt(IntT I) { *this = I; }
+
+  PointerEmbeddedInt &operator=(IntT I) {
+    assert((std::is_signed<IntT>::value ? isInt<Bits>(I) : isUInt<Bits>(I)) &&
+           "Integer has bits outside those preserved!");
+    Value = static_cast<uintptr_t>(I) << Shift;
+    return *this;
+  }
+
+  // Note that this implicit conversion additionally allows all of the basic
+  // comparison operators to work transparently, etc.
+  operator IntT() const {
+    if (std::is_signed<IntT>::value)
+      return static_cast<IntT>(static_cast<intptr_t>(Value) >> Shift);
+    return static_cast<IntT>(Value >> Shift);
+  }
+};
+
+// Provide pointer like traits to support use with pointer unions and sum
+// types.
+template <typename IntT, int Bits>
+struct PointerLikeTypeTraits<PointerEmbeddedInt<IntT, Bits>> {
+  using T = PointerEmbeddedInt<IntT, Bits>;
+
+  static inline void *getAsVoidPointer(const T &P) {
+    return reinterpret_cast<void *>(P.Value);
+  }
+
+  static inline T getFromVoidPointer(void *P) {
+    return T(reinterpret_cast<uintptr_t>(P), typename T::RawValueTag());
+  }
+
+  static inline T getFromVoidPointer(const void *P) {
+    return T(reinterpret_cast<uintptr_t>(P), typename T::RawValueTag());
+  }
+
+  enum { NumLowBitsAvailable = T::Shift };
+};
+
+// Teach DenseMap how to use PointerEmbeddedInt objects as keys if the Int type
+// itself can be a key.
+template <typename IntT, int Bits>
+struct DenseMapInfo<PointerEmbeddedInt<IntT, Bits>> {
+  using T = PointerEmbeddedInt<IntT, Bits>;
+  using IntInfo = DenseMapInfo<IntT>;
+
+  static inline T getEmptyKey() { return IntInfo::getEmptyKey(); }
+  static inline T getTombstoneKey() { return IntInfo::getTombstoneKey(); }
+
+  static unsigned getHashValue(const T &Arg) {
+    return IntInfo::getHashValue(Arg);
+  }
+
+  static bool isEqual(const T &LHS, const T &RHS) { return LHS == RHS; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_POINTEREMBEDDEDINT_H
diff --git a/linux-x64/clang/include/llvm/ADT/PointerIntPair.h b/linux-x64/clang/include/llvm/ADT/PointerIntPair.h
new file mode 100644
index 0000000..884d051
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/PointerIntPair.h
@@ -0,0 +1,233 @@
+//===- llvm/ADT/PointerIntPair.h - Pair for pointer and int -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PointerIntPair class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POINTERINTPAIR_H
+#define LLVM_ADT_POINTERINTPAIR_H
+
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
+#include <cstdint>
+#include <limits>
+
+namespace llvm {
+
+template <typename T> struct DenseMapInfo;
+template <typename PointerT, unsigned IntBits, typename PtrTraits>
+struct PointerIntPairInfo;
+
+/// PointerIntPair - This class implements a pair of a pointer and small
+/// integer.  It is designed to represent this in the space required by one
+/// pointer by bitmangling the integer into the low part of the pointer.  This
+/// can only be done for small integers: typically up to 3 bits, but it depends
+/// on the number of bits available according to PointerLikeTypeTraits for the
+/// type.
+///
+/// Note that PointerIntPair always puts the IntVal part in the highest bits
+/// possible.  For example, PointerIntPair<void*, 1, bool> will put the bit for
+/// the bool into bit #2, not bit #0, which allows the low two bits to be used
+/// for something else.  For example, this allows:
+///   PointerIntPair<PointerIntPair<void*, 1, bool>, 1, bool>
+/// ... and the two bools will land in different bits.
+template <typename PointerTy, unsigned IntBits, typename IntType = unsigned,
+          typename PtrTraits = PointerLikeTypeTraits<PointerTy>,
+          typename Info = PointerIntPairInfo<PointerTy, IntBits, PtrTraits>>
+class PointerIntPair {
+  intptr_t Value = 0;
+
+public:
+  constexpr PointerIntPair() = default;
+
+  PointerIntPair(PointerTy PtrVal, IntType IntVal) {
+    setPointerAndInt(PtrVal, IntVal);
+  }
+
+  explicit PointerIntPair(PointerTy PtrVal) { initWithPointer(PtrVal); }
+
+  PointerTy getPointer() const { return Info::getPointer(Value); }
+
+  IntType getInt() const { return (IntType)Info::getInt(Value); }
+
+  void setPointer(PointerTy PtrVal) {
+    Value = Info::updatePointer(Value, PtrVal);
+  }
+
+  void setInt(IntType IntVal) {
+    Value = Info::updateInt(Value, static_cast<intptr_t>(IntVal));
+  }
+
+  void initWithPointer(PointerTy PtrVal) {
+    Value = Info::updatePointer(0, PtrVal);
+  }
+
+  void setPointerAndInt(PointerTy PtrVal, IntType IntVal) {
+    Value = Info::updateInt(Info::updatePointer(0, PtrVal),
+                            static_cast<intptr_t>(IntVal));
+  }
+
+  PointerTy const *getAddrOfPointer() const {
+    return const_cast<PointerIntPair *>(this)->getAddrOfPointer();
+  }
+
+  PointerTy *getAddrOfPointer() {
+    assert(Value == reinterpret_cast<intptr_t>(getPointer()) &&
+           "Can only return the address if IntBits is cleared and "
+           "PtrTraits doesn't change the pointer");
+    return reinterpret_cast<PointerTy *>(&Value);
+  }
+
+  void *getOpaqueValue() const { return reinterpret_cast<void *>(Value); }
+
+  void setFromOpaqueValue(void *Val) {
+    Value = reinterpret_cast<intptr_t>(Val);
+  }
+
+  static PointerIntPair getFromOpaqueValue(void *V) {
+    PointerIntPair P;
+    P.setFromOpaqueValue(V);
+    return P;
+  }
+
+  // Allow PointerIntPairs to be created from const void * if and only if the
+  // pointer type could be created from a const void *.
+  static PointerIntPair getFromOpaqueValue(const void *V) {
+    (void)PtrTraits::getFromVoidPointer(V);
+    return getFromOpaqueValue(const_cast<void *>(V));
+  }
+
+  bool operator==(const PointerIntPair &RHS) const {
+    return Value == RHS.Value;
+  }
+
+  bool operator!=(const PointerIntPair &RHS) const {
+    return Value != RHS.Value;
+  }
+
+  bool operator<(const PointerIntPair &RHS) const { return Value < RHS.Value; }
+  bool operator>(const PointerIntPair &RHS) const { return Value > RHS.Value; }
+
+  bool operator<=(const PointerIntPair &RHS) const {
+    return Value <= RHS.Value;
+  }
+
+  bool operator>=(const PointerIntPair &RHS) const {
+    return Value >= RHS.Value;
+  }
+};
+
+template <typename PointerT, unsigned IntBits, typename PtrTraits>
+struct PointerIntPairInfo {
+  static_assert(PtrTraits::NumLowBitsAvailable <
+                    std::numeric_limits<uintptr_t>::digits,
+                "cannot use a pointer type that has all bits free");
+  static_assert(IntBits <= PtrTraits::NumLowBitsAvailable,
+                "PointerIntPair with integer size too large for pointer");
+  enum : uintptr_t {
+    /// PointerBitMask - The bits that come from the pointer.
+    PointerBitMask =
+        ~(uintptr_t)(((intptr_t)1 << PtrTraits::NumLowBitsAvailable) - 1),
+
+    /// IntShift - The number of low bits that we reserve for other uses, and
+    /// keep zero.
+    IntShift = (uintptr_t)PtrTraits::NumLowBitsAvailable - IntBits,
+
+    /// IntMask - This is the unshifted mask for valid bits of the int type.
+    IntMask = (uintptr_t)(((intptr_t)1 << IntBits) - 1),
+
+    // ShiftedIntMask - This is the bits for the integer shifted in place.
+    ShiftedIntMask = (uintptr_t)(IntMask << IntShift)
+  };
+
+  static PointerT getPointer(intptr_t Value) {
+    return PtrTraits::getFromVoidPointer(
+        reinterpret_cast<void *>(Value & PointerBitMask));
+  }
+
+  static intptr_t getInt(intptr_t Value) {
+    return (Value >> IntShift) & IntMask;
+  }
+
+  static intptr_t updatePointer(intptr_t OrigValue, PointerT Ptr) {
+    intptr_t PtrWord =
+        reinterpret_cast<intptr_t>(PtrTraits::getAsVoidPointer(Ptr));
+    assert((PtrWord & ~PointerBitMask) == 0 &&
+           "Pointer is not sufficiently aligned");
+    // Preserve all low bits, just update the pointer.
+    return PtrWord | (OrigValue & ~PointerBitMask);
+  }
+
+  static intptr_t updateInt(intptr_t OrigValue, intptr_t Int) {
+    intptr_t IntWord = static_cast<intptr_t>(Int);
+    assert((IntWord & ~IntMask) == 0 && "Integer too large for field");
+
+    // Preserve all bits other than the ones we are updating.
+    return (OrigValue & ~ShiftedIntMask) | IntWord << IntShift;
+  }
+};
+
+template <typename T> struct isPodLike;
+template <typename PointerTy, unsigned IntBits, typename IntType>
+struct isPodLike<PointerIntPair<PointerTy, IntBits, IntType>> {
+  static const bool value = true;
+};
+
+// Provide specialization of DenseMapInfo for PointerIntPair.
+template <typename PointerTy, unsigned IntBits, typename IntType>
+struct DenseMapInfo<PointerIntPair<PointerTy, IntBits, IntType>> {
+  using Ty = PointerIntPair<PointerTy, IntBits, IntType>;
+
+  static Ty getEmptyKey() {
+    uintptr_t Val = static_cast<uintptr_t>(-1);
+    Val <<= PointerLikeTypeTraits<Ty>::NumLowBitsAvailable;
+    return Ty::getFromOpaqueValue(reinterpret_cast<void *>(Val));
+  }
+
+  static Ty getTombstoneKey() {
+    uintptr_t Val = static_cast<uintptr_t>(-2);
+    Val <<= PointerLikeTypeTraits<PointerTy>::NumLowBitsAvailable;
+    return Ty::getFromOpaqueValue(reinterpret_cast<void *>(Val));
+  }
+
+  static unsigned getHashValue(Ty V) {
+    uintptr_t IV = reinterpret_cast<uintptr_t>(V.getOpaqueValue());
+    return unsigned(IV) ^ unsigned(IV >> 9);
+  }
+
+  static bool isEqual(const Ty &LHS, const Ty &RHS) { return LHS == RHS; }
+};
+
+// Teach SmallPtrSet that PointerIntPair is "basically a pointer".
+template <typename PointerTy, unsigned IntBits, typename IntType,
+          typename PtrTraits>
+struct PointerLikeTypeTraits<
+    PointerIntPair<PointerTy, IntBits, IntType, PtrTraits>> {
+  static inline void *
+  getAsVoidPointer(const PointerIntPair<PointerTy, IntBits, IntType> &P) {
+    return P.getOpaqueValue();
+  }
+
+  static inline PointerIntPair<PointerTy, IntBits, IntType>
+  getFromVoidPointer(void *P) {
+    return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
+  }
+
+  static inline PointerIntPair<PointerTy, IntBits, IntType>
+  getFromVoidPointer(const void *P) {
+    return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
+  }
+
+  enum { NumLowBitsAvailable = PtrTraits::NumLowBitsAvailable - IntBits };
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_POINTERINTPAIR_H
diff --git a/linux-x64/clang/include/llvm/ADT/PointerSumType.h b/linux-x64/clang/include/llvm/ADT/PointerSumType.h
new file mode 100644
index 0000000..e379571
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/PointerSumType.h
@@ -0,0 +1,207 @@
+//===- llvm/ADT/PointerSumType.h --------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POINTERSUMTYPE_H
+#define LLVM_ADT_POINTERSUMTYPE_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
+#include <cstdint>
+#include <type_traits>
+
+namespace llvm {
+
+/// A compile time pair of an integer tag and the pointer-like type which it
+/// indexes within a sum type. Also allows the user to specify a particular
+/// traits class for pointer types with custom behavior such as over-aligned
+/// allocation.
+template <uintptr_t N, typename PointerArgT,
+          typename TraitsArgT = PointerLikeTypeTraits<PointerArgT>>
+struct PointerSumTypeMember {
+  enum { Tag = N };
+  using PointerT = PointerArgT;
+  using TraitsT = TraitsArgT;
+};
+
+namespace detail {
+
+template <typename TagT, typename... MemberTs> struct PointerSumTypeHelper;
+
+} // end namespace detail
+
+/// A sum type over pointer-like types.
+///
+/// This is a normal tagged union across pointer-like types that uses the low
+/// bits of the pointers to store the tag.
+///
+/// Each member of the sum type is specified by passing a \c
+/// PointerSumTypeMember specialization in the variadic member argument list.
+/// This allows the user to control the particular tag value associated with
+/// a particular type, use the same type for multiple different tags, and
+/// customize the pointer-like traits used for a particular member. Note that
+/// these *must* be specializations of \c PointerSumTypeMember, no other type
+/// will suffice, even if it provides a compatible interface.
+///
+/// This type implements all of the comparison operators and even hash table
+/// support by comparing the underlying storage of the pointer values. It
+/// doesn't support delegating to particular members for comparisons.
+///
+/// It also default constructs to a zero tag with a null pointer, whatever that
+/// would be. This means that the zero value for the tag type is significant
+/// and may be desirable to set to a state that is particularly desirable to
+/// default construct.
+///
+/// There is no support for constructing or accessing with a dynamic tag as
+/// that would fundamentally violate the type safety provided by the sum type.
+template <typename TagT, typename... MemberTs> class PointerSumType {
+  uintptr_t Value = 0;
+
+  using HelperT = detail::PointerSumTypeHelper<TagT, MemberTs...>;
+
+public:
+  constexpr PointerSumType() = default;
+
+  /// A typed constructor for a specific tagged member of the sum type.
+  template <TagT N>
+  static PointerSumType
+  create(typename HelperT::template Lookup<N>::PointerT Pointer) {
+    PointerSumType Result;
+    void *V = HelperT::template Lookup<N>::TraitsT::getAsVoidPointer(Pointer);
+    assert((reinterpret_cast<uintptr_t>(V) & HelperT::TagMask) == 0 &&
+           "Pointer is insufficiently aligned to store the discriminant!");
+    Result.Value = reinterpret_cast<uintptr_t>(V) | N;
+    return Result;
+  }
+
+  TagT getTag() const { return static_cast<TagT>(Value & HelperT::TagMask); }
+
+  template <TagT N> bool is() const { return N == getTag(); }
+
+  template <TagT N> typename HelperT::template Lookup<N>::PointerT get() const {
+    void *P = is<N>() ? getImpl() : nullptr;
+    return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(P);
+  }
+
+  template <TagT N>
+  typename HelperT::template Lookup<N>::PointerT cast() const {
+    assert(is<N>() && "This instance has a different active member.");
+    return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(getImpl());
+  }
+
+  explicit operator bool() const { return Value & HelperT::PointerMask; }
+  bool operator==(const PointerSumType &R) const { return Value == R.Value; }
+  bool operator!=(const PointerSumType &R) const { return Value != R.Value; }
+  bool operator<(const PointerSumType &R) const { return Value < R.Value; }
+  bool operator>(const PointerSumType &R) const { return Value > R.Value; }
+  bool operator<=(const PointerSumType &R) const { return Value <= R.Value; }
+  bool operator>=(const PointerSumType &R) const { return Value >= R.Value; }
+
+  uintptr_t getOpaqueValue() const { return Value; }
+
+protected:
+  void *getImpl() const {
+    return reinterpret_cast<void *>(Value & HelperT::PointerMask);
+  }
+};
+
+namespace detail {
+
+/// A helper template for implementing \c PointerSumType. It provides fast
+/// compile-time lookup of the member from a particular tag value, along with
+/// useful constants and compile time checking infrastructure..
+template <typename TagT, typename... MemberTs>
+struct PointerSumTypeHelper : MemberTs... {
+  // First we use a trick to allow quickly looking up information about
+  // a particular member of the sum type. This works because we arranged to
+  // have this type derive from all of the member type templates. We can select
+  // the matching member for a tag using type deduction during overload
+  // resolution.
+  template <TagT N, typename PointerT, typename TraitsT>
+  static PointerSumTypeMember<N, PointerT, TraitsT>
+  LookupOverload(PointerSumTypeMember<N, PointerT, TraitsT> *);
+  template <TagT N> static void LookupOverload(...);
+  template <TagT N> struct Lookup {
+    // Compute a particular member type by resolving the lookup helper ovorload.
+    using MemberT = decltype(
+        LookupOverload<N>(static_cast<PointerSumTypeHelper *>(nullptr)));
+
+    /// The Nth member's pointer type.
+    using PointerT = typename MemberT::PointerT;
+
+    /// The Nth member's traits type.
+    using TraitsT = typename MemberT::TraitsT;
+  };
+
+  // Next we need to compute the number of bits available for the discriminant
+  // by taking the min of the bits available for each member. Much of this
+  // would be amazingly easier with good constexpr support.
+  template <uintptr_t V, uintptr_t... Vs>
+  struct Min : std::integral_constant<
+                   uintptr_t, (V < Min<Vs...>::value ? V : Min<Vs...>::value)> {
+  };
+  template <uintptr_t V>
+  struct Min<V> : std::integral_constant<uintptr_t, V> {};
+  enum { NumTagBits = Min<MemberTs::TraitsT::NumLowBitsAvailable...>::value };
+
+  // Also compute the smallest discriminant and various masks for convenience.
+  enum : uint64_t {
+    MinTag = Min<MemberTs::Tag...>::value,
+    PointerMask = static_cast<uint64_t>(-1) << NumTagBits,
+    TagMask = ~PointerMask
+  };
+
+  // Finally we need a recursive template to do static checks of each
+  // member.
+  template <typename MemberT, typename... InnerMemberTs>
+  struct Checker : Checker<InnerMemberTs...> {
+    static_assert(MemberT::Tag < (1 << NumTagBits),
+                  "This discriminant value requires too many bits!");
+  };
+  template <typename MemberT> struct Checker<MemberT> : std::true_type {
+    static_assert(MemberT::Tag < (1 << NumTagBits),
+                  "This discriminant value requires too many bits!");
+  };
+  static_assert(Checker<MemberTs...>::value,
+                "Each member must pass the checker.");
+};
+
+} // end namespace detail
+
+// Teach DenseMap how to use PointerSumTypes as keys.
+template <typename TagT, typename... MemberTs>
+struct DenseMapInfo<PointerSumType<TagT, MemberTs...>> {
+  using SumType = PointerSumType<TagT, MemberTs...>;
+  using HelperT = detail::PointerSumTypeHelper<TagT, MemberTs...>;
+  enum { SomeTag = HelperT::MinTag };
+  using SomePointerT =
+      typename HelperT::template Lookup<HelperT::MinTag>::PointerT;
+  using SomePointerInfo = DenseMapInfo<SomePointerT>;
+
+  static inline SumType getEmptyKey() {
+    return SumType::create<SomeTag>(SomePointerInfo::getEmptyKey());
+  }
+
+  static inline SumType getTombstoneKey() {
+    return SumType::create<SomeTag>(SomePointerInfo::getTombstoneKey());
+  }
+
+  static unsigned getHashValue(const SumType &Arg) {
+    uintptr_t OpaqueValue = Arg.getOpaqueValue();
+    return DenseMapInfo<uintptr_t>::getHashValue(OpaqueValue);
+  }
+
+  static bool isEqual(const SumType &LHS, const SumType &RHS) {
+    return LHS == RHS;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_POINTERSUMTYPE_H
diff --git a/linux-x64/clang/include/llvm/ADT/PointerUnion.h b/linux-x64/clang/include/llvm/ADT/PointerUnion.h
new file mode 100644
index 0000000..315e583
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/PointerUnion.h
@@ -0,0 +1,491 @@
+//===- llvm/ADT/PointerUnion.h - Discriminated Union of 2 Ptrs --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PointerUnion class, which is a discriminated union of
+// pointer types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POINTERUNION_H
+#define LLVM_ADT_POINTERUNION_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+
+namespace llvm {
+
+template <typename T> struct PointerUnionTypeSelectorReturn {
+  using Return = T;
+};
+
+/// Get a type based on whether two types are the same or not.
+///
+/// For:
+///
+/// \code
+///   using Ret = typename PointerUnionTypeSelector<T1, T2, EQ, NE>::Return;
+/// \endcode
+///
+/// Ret will be EQ type if T1 is same as T2 or NE type otherwise.
+template <typename T1, typename T2, typename RET_EQ, typename RET_NE>
+struct PointerUnionTypeSelector {
+  using Return = typename PointerUnionTypeSelectorReturn<RET_NE>::Return;
+};
+
+template <typename T, typename RET_EQ, typename RET_NE>
+struct PointerUnionTypeSelector<T, T, RET_EQ, RET_NE> {
+  using Return = typename PointerUnionTypeSelectorReturn<RET_EQ>::Return;
+};
+
+template <typename T1, typename T2, typename RET_EQ, typename RET_NE>
+struct PointerUnionTypeSelectorReturn<
+    PointerUnionTypeSelector<T1, T2, RET_EQ, RET_NE>> {
+  using Return =
+      typename PointerUnionTypeSelector<T1, T2, RET_EQ, RET_NE>::Return;
+};
+
+/// Provide PointerLikeTypeTraits for void* that is used by PointerUnion
+/// for the two template arguments.
+template <typename PT1, typename PT2> class PointerUnionUIntTraits {
+public:
+  static inline void *getAsVoidPointer(void *P) { return P; }
+  static inline void *getFromVoidPointer(void *P) { return P; }
+
+  enum {
+    PT1BitsAv = (int)(PointerLikeTypeTraits<PT1>::NumLowBitsAvailable),
+    PT2BitsAv = (int)(PointerLikeTypeTraits<PT2>::NumLowBitsAvailable),
+    NumLowBitsAvailable = PT1BitsAv < PT2BitsAv ? PT1BitsAv : PT2BitsAv
+  };
+};
+
+/// A discriminated union of two pointer types, with the discriminator in the
+/// low bit of the pointer.
+///
+/// This implementation is extremely efficient in space due to leveraging the
+/// low bits of the pointer, while exposing a natural and type-safe API.
+///
+/// Common use patterns would be something like this:
+///    PointerUnion<int*, float*> P;
+///    P = (int*)0;
+///    printf("%d %d", P.is<int*>(), P.is<float*>());  // prints "1 0"
+///    X = P.get<int*>();     // ok.
+///    Y = P.get<float*>();   // runtime assertion failure.
+///    Z = P.get<double*>();  // compile time failure.
+///    P = (float*)0;
+///    Y = P.get<float*>();   // ok.
+///    X = P.get<int*>();     // runtime assertion failure.
+template <typename PT1, typename PT2> class PointerUnion {
+public:
+  using ValTy =
+      PointerIntPair<void *, 1, bool, PointerUnionUIntTraits<PT1, PT2>>;
+
+private:
+  ValTy Val;
+
+  struct IsPT1 {
+    static const int Num = 0;
+  };
+  struct IsPT2 {
+    static const int Num = 1;
+  };
+  template <typename T> struct UNION_DOESNT_CONTAIN_TYPE {};
+
+public:
+  PointerUnion() = default;
+  PointerUnion(PT1 V)
+      : Val(const_cast<void *>(
+            PointerLikeTypeTraits<PT1>::getAsVoidPointer(V))) {}
+  PointerUnion(PT2 V)
+      : Val(const_cast<void *>(PointerLikeTypeTraits<PT2>::getAsVoidPointer(V)),
+            1) {}
+
+  /// Test if the pointer held in the union is null, regardless of
+  /// which type it is.
+  bool isNull() const {
+    // Convert from the void* to one of the pointer types, to make sure that
+    // we recursively strip off low bits if we have a nested PointerUnion.
+    return !PointerLikeTypeTraits<PT1>::getFromVoidPointer(Val.getPointer());
+  }
+
+  explicit operator bool() const { return !isNull(); }
+
+  /// Test if the Union currently holds the type matching T.
+  template <typename T> int is() const {
+    using Ty = typename ::llvm::PointerUnionTypeSelector<
+        PT1, T, IsPT1,
+        ::llvm::PointerUnionTypeSelector<PT2, T, IsPT2,
+                                         UNION_DOESNT_CONTAIN_TYPE<T>>>::Return;
+    int TyNo = Ty::Num;
+    return static_cast<int>(Val.getInt()) == TyNo;
+  }
+
+  /// Returns the value of the specified pointer type.
+  ///
+  /// If the specified pointer type is incorrect, assert.
+  template <typename T> T get() const {
+    assert(is<T>() && "Invalid accessor called");
+    return PointerLikeTypeTraits<T>::getFromVoidPointer(Val.getPointer());
+  }
+
+  /// Returns the current pointer if it is of the specified pointer type,
+  /// otherwises returns null.
+  template <typename T> T dyn_cast() const {
+    if (is<T>())
+      return get<T>();
+    return T();
+  }
+
+  /// If the union is set to the first pointer type get an address pointing to
+  /// it.
+  PT1 const *getAddrOfPtr1() const {
+    return const_cast<PointerUnion *>(this)->getAddrOfPtr1();
+  }
+
+  /// If the union is set to the first pointer type get an address pointing to
+  /// it.
+  PT1 *getAddrOfPtr1() {
+    assert(is<PT1>() && "Val is not the first pointer");
+    assert(
+        get<PT1>() == Val.getPointer() &&
+        "Can't get the address because PointerLikeTypeTraits changes the ptr");
+    return const_cast<PT1 *>(
+        reinterpret_cast<const PT1 *>(Val.getAddrOfPointer()));
+  }
+
+  /// Assignment from nullptr which just clears the union.
+  const PointerUnion &operator=(std::nullptr_t) {
+    Val.initWithPointer(nullptr);
+    return *this;
+  }
+
+  /// Assignment operators - Allow assigning into this union from either
+  /// pointer type, setting the discriminator to remember what it came from.
+  const PointerUnion &operator=(const PT1 &RHS) {
+    Val.initWithPointer(
+        const_cast<void *>(PointerLikeTypeTraits<PT1>::getAsVoidPointer(RHS)));
+    return *this;
+  }
+  const PointerUnion &operator=(const PT2 &RHS) {
+    Val.setPointerAndInt(
+        const_cast<void *>(PointerLikeTypeTraits<PT2>::getAsVoidPointer(RHS)),
+        1);
+    return *this;
+  }
+
+  void *getOpaqueValue() const { return Val.getOpaqueValue(); }
+  static inline PointerUnion getFromOpaqueValue(void *VP) {
+    PointerUnion V;
+    V.Val = ValTy::getFromOpaqueValue(VP);
+    return V;
+  }
+};
+
+template <typename PT1, typename PT2>
+bool operator==(PointerUnion<PT1, PT2> lhs, PointerUnion<PT1, PT2> rhs) {
+  return lhs.getOpaqueValue() == rhs.getOpaqueValue();
+}
+
+template <typename PT1, typename PT2>
+bool operator!=(PointerUnion<PT1, PT2> lhs, PointerUnion<PT1, PT2> rhs) {
+  return lhs.getOpaqueValue() != rhs.getOpaqueValue();
+}
+
+template <typename PT1, typename PT2>
+bool operator<(PointerUnion<PT1, PT2> lhs, PointerUnion<PT1, PT2> rhs) {
+  return lhs.getOpaqueValue() < rhs.getOpaqueValue();
+}
+
+// Teach SmallPtrSet that PointerUnion is "basically a pointer", that has
+// # low bits available = min(PT1bits,PT2bits)-1.
+template <typename PT1, typename PT2>
+struct PointerLikeTypeTraits<PointerUnion<PT1, PT2>> {
+  static inline void *getAsVoidPointer(const PointerUnion<PT1, PT2> &P) {
+    return P.getOpaqueValue();
+  }
+
+  static inline PointerUnion<PT1, PT2> getFromVoidPointer(void *P) {
+    return PointerUnion<PT1, PT2>::getFromOpaqueValue(P);
+  }
+
+  // The number of bits available are the min of the two pointer types.
+  enum {
+    NumLowBitsAvailable = PointerLikeTypeTraits<
+        typename PointerUnion<PT1, PT2>::ValTy>::NumLowBitsAvailable
+  };
+};
+
+/// A pointer union of three pointer types. See documentation for PointerUnion
+/// for usage.
+template <typename PT1, typename PT2, typename PT3> class PointerUnion3 {
+public:
+  using InnerUnion = PointerUnion<PT1, PT2>;
+  using ValTy = PointerUnion<InnerUnion, PT3>;
+
+private:
+  ValTy Val;
+
+  struct IsInnerUnion {
+    ValTy Val;
+
+    IsInnerUnion(ValTy val) : Val(val) {}
+
+    template <typename T> int is() const {
+      return Val.template is<InnerUnion>() &&
+             Val.template get<InnerUnion>().template is<T>();
+    }
+
+    template <typename T> T get() const {
+      return Val.template get<InnerUnion>().template get<T>();
+    }
+  };
+
+  struct IsPT3 {
+    ValTy Val;
+
+    IsPT3(ValTy val) : Val(val) {}
+
+    template <typename T> int is() const { return Val.template is<T>(); }
+    template <typename T> T get() const { return Val.template get<T>(); }
+  };
+
+public:
+  PointerUnion3() = default;
+  PointerUnion3(PT1 V) { Val = InnerUnion(V); }
+  PointerUnion3(PT2 V) { Val = InnerUnion(V); }
+  PointerUnion3(PT3 V) { Val = V; }
+
+  /// Test if the pointer held in the union is null, regardless of
+  /// which type it is.
+  bool isNull() const { return Val.isNull(); }
+  explicit operator bool() const { return !isNull(); }
+
+  /// Test if the Union currently holds the type matching T.
+  template <typename T> int is() const {
+    // If T is PT1/PT2 choose IsInnerUnion otherwise choose IsPT3.
+    using Ty = typename ::llvm::PointerUnionTypeSelector<
+        PT1, T, IsInnerUnion,
+        ::llvm::PointerUnionTypeSelector<PT2, T, IsInnerUnion, IsPT3>>::Return;
+    return Ty(Val).template is<T>();
+  }
+
+  /// Returns the value of the specified pointer type.
+  ///
+  /// If the specified pointer type is incorrect, assert.
+  template <typename T> T get() const {
+    assert(is<T>() && "Invalid accessor called");
+    // If T is PT1/PT2 choose IsInnerUnion otherwise choose IsPT3.
+    using Ty = typename ::llvm::PointerUnionTypeSelector<
+        PT1, T, IsInnerUnion,
+        ::llvm::PointerUnionTypeSelector<PT2, T, IsInnerUnion, IsPT3>>::Return;
+    return Ty(Val).template get<T>();
+  }
+
+  /// Returns the current pointer if it is of the specified pointer type,
+  /// otherwises returns null.
+  template <typename T> T dyn_cast() const {
+    if (is<T>())
+      return get<T>();
+    return T();
+  }
+
+  /// Assignment from nullptr which just clears the union.
+  const PointerUnion3 &operator=(std::nullptr_t) {
+    Val = nullptr;
+    return *this;
+  }
+
+  /// Assignment operators - Allow assigning into this union from either
+  /// pointer type, setting the discriminator to remember what it came from.
+  const PointerUnion3 &operator=(const PT1 &RHS) {
+    Val = InnerUnion(RHS);
+    return *this;
+  }
+  const PointerUnion3 &operator=(const PT2 &RHS) {
+    Val = InnerUnion(RHS);
+    return *this;
+  }
+  const PointerUnion3 &operator=(const PT3 &RHS) {
+    Val = RHS;
+    return *this;
+  }
+
+  void *getOpaqueValue() const { return Val.getOpaqueValue(); }
+  static inline PointerUnion3 getFromOpaqueValue(void *VP) {
+    PointerUnion3 V;
+    V.Val = ValTy::getFromOpaqueValue(VP);
+    return V;
+  }
+};
+
+// Teach SmallPtrSet that PointerUnion3 is "basically a pointer", that has
+// # low bits available = min(PT1bits,PT2bits,PT2bits)-2.
+template <typename PT1, typename PT2, typename PT3>
+struct PointerLikeTypeTraits<PointerUnion3<PT1, PT2, PT3>> {
+  static inline void *getAsVoidPointer(const PointerUnion3<PT1, PT2, PT3> &P) {
+    return P.getOpaqueValue();
+  }
+
+  static inline PointerUnion3<PT1, PT2, PT3> getFromVoidPointer(void *P) {
+    return PointerUnion3<PT1, PT2, PT3>::getFromOpaqueValue(P);
+  }
+
+  // The number of bits available are the min of the two pointer types.
+  enum {
+    NumLowBitsAvailable = PointerLikeTypeTraits<
+        typename PointerUnion3<PT1, PT2, PT3>::ValTy>::NumLowBitsAvailable
+  };
+};
+
+template <typename PT1, typename PT2, typename PT3>
+bool operator<(PointerUnion3<PT1, PT2, PT3> lhs,
+               PointerUnion3<PT1, PT2, PT3> rhs) {
+  return lhs.getOpaqueValue() < rhs.getOpaqueValue();
+}
+
+/// A pointer union of four pointer types. See documentation for PointerUnion
+/// for usage.
+template <typename PT1, typename PT2, typename PT3, typename PT4>
+class PointerUnion4 {
+public:
+  using InnerUnion1 = PointerUnion<PT1, PT2>;
+  using InnerUnion2 = PointerUnion<PT3, PT4>;
+  using ValTy = PointerUnion<InnerUnion1, InnerUnion2>;
+
+private:
+  ValTy Val;
+
+public:
+  PointerUnion4() = default;
+  PointerUnion4(PT1 V) { Val = InnerUnion1(V); }
+  PointerUnion4(PT2 V) { Val = InnerUnion1(V); }
+  PointerUnion4(PT3 V) { Val = InnerUnion2(V); }
+  PointerUnion4(PT4 V) { Val = InnerUnion2(V); }
+
+  /// Test if the pointer held in the union is null, regardless of
+  /// which type it is.
+  bool isNull() const { return Val.isNull(); }
+  explicit operator bool() const { return !isNull(); }
+
+  /// Test if the Union currently holds the type matching T.
+  template <typename T> int is() const {
+    // If T is PT1/PT2 choose InnerUnion1 otherwise choose InnerUnion2.
+    using Ty = typename ::llvm::PointerUnionTypeSelector<
+        PT1, T, InnerUnion1,
+        ::llvm::PointerUnionTypeSelector<PT2, T, InnerUnion1,
+                                         InnerUnion2>>::Return;
+    return Val.template is<Ty>() && Val.template get<Ty>().template is<T>();
+  }
+
+  /// Returns the value of the specified pointer type.
+  ///
+  /// If the specified pointer type is incorrect, assert.
+  template <typename T> T get() const {
+    assert(is<T>() && "Invalid accessor called");
+    // If T is PT1/PT2 choose InnerUnion1 otherwise choose InnerUnion2.
+    using Ty = typename ::llvm::PointerUnionTypeSelector<
+        PT1, T, InnerUnion1,
+        ::llvm::PointerUnionTypeSelector<PT2, T, InnerUnion1,
+                                         InnerUnion2>>::Return;
+    return Val.template get<Ty>().template get<T>();
+  }
+
+  /// Returns the current pointer if it is of the specified pointer type,
+  /// otherwises returns null.
+  template <typename T> T dyn_cast() const {
+    if (is<T>())
+      return get<T>();
+    return T();
+  }
+
+  /// Assignment from nullptr which just clears the union.
+  const PointerUnion4 &operator=(std::nullptr_t) {
+    Val = nullptr;
+    return *this;
+  }
+
+  /// Assignment operators - Allow assigning into this union from either
+  /// pointer type, setting the discriminator to remember what it came from.
+  const PointerUnion4 &operator=(const PT1 &RHS) {
+    Val = InnerUnion1(RHS);
+    return *this;
+  }
+  const PointerUnion4 &operator=(const PT2 &RHS) {
+    Val = InnerUnion1(RHS);
+    return *this;
+  }
+  const PointerUnion4 &operator=(const PT3 &RHS) {
+    Val = InnerUnion2(RHS);
+    return *this;
+  }
+  const PointerUnion4 &operator=(const PT4 &RHS) {
+    Val = InnerUnion2(RHS);
+    return *this;
+  }
+
+  void *getOpaqueValue() const { return Val.getOpaqueValue(); }
+  static inline PointerUnion4 getFromOpaqueValue(void *VP) {
+    PointerUnion4 V;
+    V.Val = ValTy::getFromOpaqueValue(VP);
+    return V;
+  }
+};
+
+// Teach SmallPtrSet that PointerUnion4 is "basically a pointer", that has
+// # low bits available = min(PT1bits,PT2bits,PT2bits)-2.
+template <typename PT1, typename PT2, typename PT3, typename PT4>
+struct PointerLikeTypeTraits<PointerUnion4<PT1, PT2, PT3, PT4>> {
+  static inline void *
+  getAsVoidPointer(const PointerUnion4<PT1, PT2, PT3, PT4> &P) {
+    return P.getOpaqueValue();
+  }
+
+  static inline PointerUnion4<PT1, PT2, PT3, PT4> getFromVoidPointer(void *P) {
+    return PointerUnion4<PT1, PT2, PT3, PT4>::getFromOpaqueValue(P);
+  }
+
+  // The number of bits available are the min of the two pointer types.
+  enum {
+    NumLowBitsAvailable = PointerLikeTypeTraits<
+        typename PointerUnion4<PT1, PT2, PT3, PT4>::ValTy>::NumLowBitsAvailable
+  };
+};
+
+// Teach DenseMap how to use PointerUnions as keys.
+template <typename T, typename U> struct DenseMapInfo<PointerUnion<T, U>> {
+  using Pair = PointerUnion<T, U>;
+  using FirstInfo = DenseMapInfo<T>;
+  using SecondInfo = DenseMapInfo<U>;
+
+  static inline Pair getEmptyKey() { return Pair(FirstInfo::getEmptyKey()); }
+
+  static inline Pair getTombstoneKey() {
+    return Pair(FirstInfo::getTombstoneKey());
+  }
+
+  static unsigned getHashValue(const Pair &PairVal) {
+    intptr_t key = (intptr_t)PairVal.getOpaqueValue();
+    return DenseMapInfo<intptr_t>::getHashValue(key);
+  }
+
+  static bool isEqual(const Pair &LHS, const Pair &RHS) {
+    return LHS.template is<T>() == RHS.template is<T>() &&
+           (LHS.template is<T>() ? FirstInfo::isEqual(LHS.template get<T>(),
+                                                      RHS.template get<T>())
+                                 : SecondInfo::isEqual(LHS.template get<U>(),
+                                                       RHS.template get<U>()));
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_POINTERUNION_H
diff --git a/linux-x64/clang/include/llvm/ADT/PostOrderIterator.h b/linux-x64/clang/include/llvm/ADT/PostOrderIterator.h
new file mode 100644
index 0000000..dc8a9b6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/PostOrderIterator.h
@@ -0,0 +1,309 @@
+//===- llvm/ADT/PostOrderIterator.h - PostOrder iterator --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file builds on the ADT/GraphTraits.h file to build a generic graph
+// post order iterator.  This should work over any graph type that has a
+// GraphTraits specialization.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POSTORDERITERATOR_H
+#define LLVM_ADT_POSTORDERITERATOR_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/iterator_range.h"
+#include <iterator>
+#include <set>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+// The po_iterator_storage template provides access to the set of already
+// visited nodes during the po_iterator's depth-first traversal.
+//
+// The default implementation simply contains a set of visited nodes, while
+// the External=true version uses a reference to an external set.
+//
+// It is possible to prune the depth-first traversal in several ways:
+//
+// - When providing an external set that already contains some graph nodes,
+//   those nodes won't be visited again. This is useful for restarting a
+//   post-order traversal on a graph with nodes that aren't dominated by a
+//   single node.
+//
+// - By providing a custom SetType class, unwanted graph nodes can be excluded
+//   by having the insert() function return false. This could for example
+//   confine a CFG traversal to blocks in a specific loop.
+//
+// - Finally, by specializing the po_iterator_storage template itself, graph
+//   edges can be pruned by returning false in the insertEdge() function. This
+//   could be used to remove loop back-edges from the CFG seen by po_iterator.
+//
+// A specialized po_iterator_storage class can observe both the pre-order and
+// the post-order. The insertEdge() function is called in a pre-order, while
+// the finishPostorder() function is called just before the po_iterator moves
+// on to the next node.
+
+/// Default po_iterator_storage implementation with an internal set object.
+template<class SetType, bool External>
+class po_iterator_storage {
+  SetType Visited;
+
+public:
+  // Return true if edge destination should be visited.
+  template <typename NodeRef>
+  bool insertEdge(Optional<NodeRef> From, NodeRef To) {
+    return Visited.insert(To).second;
+  }
+
+  // Called after all children of BB have been visited.
+  template <typename NodeRef> void finishPostorder(NodeRef BB) {}
+};
+
+/// Specialization of po_iterator_storage that references an external set.
+template<class SetType>
+class po_iterator_storage<SetType, true> {
+  SetType &Visited;
+
+public:
+  po_iterator_storage(SetType &VSet) : Visited(VSet) {}
+  po_iterator_storage(const po_iterator_storage &S) : Visited(S.Visited) {}
+
+  // Return true if edge destination should be visited, called with From = 0 for
+  // the root node.
+  // Graph edges can be pruned by specializing this function.
+  template <class NodeRef> bool insertEdge(Optional<NodeRef> From, NodeRef To) {
+    return Visited.insert(To).second;
+  }
+
+  // Called after all children of BB have been visited.
+  template <class NodeRef> void finishPostorder(NodeRef BB) {}
+};
+
+template <class GraphT,
+          class SetType =
+              SmallPtrSet<typename GraphTraits<GraphT>::NodeRef, 8>,
+          bool ExtStorage = false, class GT = GraphTraits<GraphT>>
+class po_iterator
+    : public std::iterator<std::forward_iterator_tag, typename GT::NodeRef>,
+      public po_iterator_storage<SetType, ExtStorage> {
+  using super = std::iterator<std::forward_iterator_tag, typename GT::NodeRef>;
+  using NodeRef = typename GT::NodeRef;
+  using ChildItTy = typename GT::ChildIteratorType;
+
+  // VisitStack - Used to maintain the ordering.  Top = current block
+  // First element is basic block pointer, second is the 'next child' to visit
+  std::vector<std::pair<NodeRef, ChildItTy>> VisitStack;
+
+  po_iterator(NodeRef BB) {
+    this->insertEdge(Optional<NodeRef>(), BB);
+    VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
+    traverseChild();
+  }
+
+  po_iterator() = default; // End is when stack is empty.
+
+  po_iterator(NodeRef BB, SetType &S)
+      : po_iterator_storage<SetType, ExtStorage>(S) {
+    if (this->insertEdge(Optional<NodeRef>(), BB)) {
+      VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
+      traverseChild();
+    }
+  }
+
+  po_iterator(SetType &S)
+      : po_iterator_storage<SetType, ExtStorage>(S) {
+  } // End is when stack is empty.
+
+  void traverseChild() {
+    while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) {
+      NodeRef BB = *VisitStack.back().second++;
+      if (this->insertEdge(Optional<NodeRef>(VisitStack.back().first), BB)) {
+        // If the block is not visited...
+        VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
+      }
+    }
+  }
+
+public:
+  using pointer = typename super::pointer;
+
+  // Provide static "constructors"...
+  static po_iterator begin(GraphT G) {
+    return po_iterator(GT::getEntryNode(G));
+  }
+  static po_iterator end(GraphT G) { return po_iterator(); }
+
+  static po_iterator begin(GraphT G, SetType &S) {
+    return po_iterator(GT::getEntryNode(G), S);
+  }
+  static po_iterator end(GraphT G, SetType &S) { return po_iterator(S); }
+
+  bool operator==(const po_iterator &x) const {
+    return VisitStack == x.VisitStack;
+  }
+  bool operator!=(const po_iterator &x) const { return !(*this == x); }
+
+  const NodeRef &operator*() const { return VisitStack.back().first; }
+
+  // This is a nonstandard operator-> that dereferences the pointer an extra
+  // time... so that you can actually call methods ON the BasicBlock, because
+  // the contained type is a pointer.  This allows BBIt->getTerminator() f.e.
+  //
+  NodeRef operator->() const { return **this; }
+
+  po_iterator &operator++() { // Preincrement
+    this->finishPostorder(VisitStack.back().first);
+    VisitStack.pop_back();
+    if (!VisitStack.empty())
+      traverseChild();
+    return *this;
+  }
+
+  po_iterator operator++(int) { // Postincrement
+    po_iterator tmp = *this;
+    ++*this;
+    return tmp;
+  }
+};
+
+// Provide global constructors that automatically figure out correct types...
+//
+template <class T>
+po_iterator<T> po_begin(const T &G) { return po_iterator<T>::begin(G); }
+template <class T>
+po_iterator<T> po_end  (const T &G) { return po_iterator<T>::end(G); }
+
+template <class T> iterator_range<po_iterator<T>> post_order(const T &G) {
+  return make_range(po_begin(G), po_end(G));
+}
+
+// Provide global definitions of external postorder iterators...
+template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>>
+struct po_ext_iterator : public po_iterator<T, SetType, true> {
+  po_ext_iterator(const po_iterator<T, SetType, true> &V) :
+  po_iterator<T, SetType, true>(V) {}
+};
+
+template<class T, class SetType>
+po_ext_iterator<T, SetType> po_ext_begin(T G, SetType &S) {
+  return po_ext_iterator<T, SetType>::begin(G, S);
+}
+
+template<class T, class SetType>
+po_ext_iterator<T, SetType> po_ext_end(T G, SetType &S) {
+  return po_ext_iterator<T, SetType>::end(G, S);
+}
+
+template <class T, class SetType>
+iterator_range<po_ext_iterator<T, SetType>> post_order_ext(const T &G, SetType &S) {
+  return make_range(po_ext_begin(G, S), po_ext_end(G, S));
+}
+
+// Provide global definitions of inverse post order iterators...
+template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>,
+          bool External = false>
+struct ipo_iterator : public po_iterator<Inverse<T>, SetType, External> {
+  ipo_iterator(const po_iterator<Inverse<T>, SetType, External> &V) :
+     po_iterator<Inverse<T>, SetType, External> (V) {}
+};
+
+template <class T>
+ipo_iterator<T> ipo_begin(const T &G) {
+  return ipo_iterator<T>::begin(G);
+}
+
+template <class T>
+ipo_iterator<T> ipo_end(const T &G){
+  return ipo_iterator<T>::end(G);
+}
+
+template <class T>
+iterator_range<ipo_iterator<T>> inverse_post_order(const T &G) {
+  return make_range(ipo_begin(G), ipo_end(G));
+}
+
+// Provide global definitions of external inverse postorder iterators...
+template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>>
+struct ipo_ext_iterator : public ipo_iterator<T, SetType, true> {
+  ipo_ext_iterator(const ipo_iterator<T, SetType, true> &V) :
+    ipo_iterator<T, SetType, true>(V) {}
+  ipo_ext_iterator(const po_iterator<Inverse<T>, SetType, true> &V) :
+    ipo_iterator<T, SetType, true>(V) {}
+};
+
+template <class T, class SetType>
+ipo_ext_iterator<T, SetType> ipo_ext_begin(const T &G, SetType &S) {
+  return ipo_ext_iterator<T, SetType>::begin(G, S);
+}
+
+template <class T, class SetType>
+ipo_ext_iterator<T, SetType> ipo_ext_end(const T &G, SetType &S) {
+  return ipo_ext_iterator<T, SetType>::end(G, S);
+}
+
+template <class T, class SetType>
+iterator_range<ipo_ext_iterator<T, SetType>>
+inverse_post_order_ext(const T &G, SetType &S) {
+  return make_range(ipo_ext_begin(G, S), ipo_ext_end(G, S));
+}
+
+//===--------------------------------------------------------------------===//
+// Reverse Post Order CFG iterator code
+//===--------------------------------------------------------------------===//
+//
+// This is used to visit basic blocks in a method in reverse post order.  This
+// class is awkward to use because I don't know a good incremental algorithm to
+// computer RPO from a graph.  Because of this, the construction of the
+// ReversePostOrderTraversal object is expensive (it must walk the entire graph
+// with a postorder iterator to build the data structures).  The moral of this
+// story is: Don't create more ReversePostOrderTraversal classes than necessary.
+//
+// Because it does the traversal in its constructor, it won't invalidate when
+// BasicBlocks are removed, *but* it may contain erased blocks. Some places
+// rely on this behavior (i.e. GVN).
+//
+// This class should be used like this:
+// {
+//   ReversePostOrderTraversal<Function*> RPOT(FuncPtr); // Expensive to create
+//   for (rpo_iterator I = RPOT.begin(); I != RPOT.end(); ++I) {
+//      ...
+//   }
+//   for (rpo_iterator I = RPOT.begin(); I != RPOT.end(); ++I) {
+//      ...
+//   }
+// }
+//
+
+template<class GraphT, class GT = GraphTraits<GraphT>>
+class ReversePostOrderTraversal {
+  using NodeRef = typename GT::NodeRef;
+
+  std::vector<NodeRef> Blocks; // Block list in normal PO order
+
+  void Initialize(NodeRef BB) {
+    std::copy(po_begin(BB), po_end(BB), std::back_inserter(Blocks));
+  }
+
+public:
+  using rpo_iterator = typename std::vector<NodeRef>::reverse_iterator;
+
+  ReversePostOrderTraversal(GraphT G) { Initialize(GT::getEntryNode(G)); }
+
+  // Because we want a reverse post order, use reverse iterators from the vector
+  rpo_iterator begin() { return Blocks.rbegin(); }
+  rpo_iterator end() { return Blocks.rend(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_POSTORDERITERATOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/PriorityQueue.h b/linux-x64/clang/include/llvm/ADT/PriorityQueue.h
new file mode 100644
index 0000000..8ba871e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/PriorityQueue.h
@@ -0,0 +1,83 @@
+//===- llvm/ADT/PriorityQueue.h - Priority queues ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PriorityQueue class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_PRIORITYQUEUE_H
+#define LLVM_ADT_PRIORITYQUEUE_H
+
+#include <algorithm>
+#include <queue>
+
+namespace llvm {
+
+/// PriorityQueue - This class behaves like std::priority_queue and
+/// provides a few additional convenience functions.
+///
+template<class T,
+         class Sequence = std::vector<T>,
+         class Compare = std::less<typename Sequence::value_type> >
+class PriorityQueue : public std::priority_queue<T, Sequence, Compare> {
+public:
+  explicit PriorityQueue(const Compare &compare = Compare(),
+                         const Sequence &sequence = Sequence())
+    : std::priority_queue<T, Sequence, Compare>(compare, sequence)
+  {}
+
+  template<class Iterator>
+  PriorityQueue(Iterator begin, Iterator end,
+                const Compare &compare = Compare(),
+                const Sequence &sequence = Sequence())
+    : std::priority_queue<T, Sequence, Compare>(begin, end, compare, sequence)
+  {}
+
+  /// erase_one - Erase one element from the queue, regardless of its
+  /// position. This operation performs a linear search to find an element
+  /// equal to t, but then uses all logarithmic-time algorithms to do
+  /// the erase operation.
+  ///
+  void erase_one(const T &t) {
+    // Linear-search to find the element.
+    typename Sequence::size_type i = find(this->c, t) - this->c.begin();
+
+    // Logarithmic-time heap bubble-up.
+    while (i != 0) {
+      typename Sequence::size_type parent = (i - 1) / 2;
+      this->c[i] = this->c[parent];
+      i = parent;
+    }
+
+    // The element we want to remove is now at the root, so we can use
+    // priority_queue's plain pop to remove it.
+    this->pop();
+  }
+
+  /// reheapify - If an element in the queue has changed in a way that
+  /// affects its standing in the comparison function, the queue's
+  /// internal state becomes invalid. Calling reheapify() resets the
+  /// queue's state, making it valid again. This operation has time
+  /// complexity proportional to the number of elements in the queue,
+  /// so don't plan to use it a lot.
+  ///
+  void reheapify() {
+    std::make_heap(this->c.begin(), this->c.end(), this->comp);
+  }
+
+  /// clear - Erase all elements from the queue.
+  ///
+  void clear() {
+    this->c.clear();
+  }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/PriorityWorklist.h b/linux-x64/clang/include/llvm/ADT/PriorityWorklist.h
new file mode 100644
index 0000000..aa531f3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/PriorityWorklist.h
@@ -0,0 +1,266 @@
+//===- PriorityWorklist.h - Worklist with insertion priority ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+///
+/// This file provides a priority worklist. See the class comments for details.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_PRIORITYWORKLIST_H
+#define LLVM_ADT_PRIORITYWORKLIST_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+#include <vector>
+
+namespace llvm {
+
+/// A FILO worklist that prioritizes on re-insertion without duplication.
+///
+/// This is very similar to a \c SetVector with the primary difference that
+/// while re-insertion does not create a duplicate, it does adjust the
+/// visitation order to respect the last insertion point. This can be useful
+/// when the visit order needs to be prioritized based on insertion point
+/// without actually having duplicate visits.
+///
+/// Note that this doesn't prevent re-insertion of elements which have been
+/// visited -- if you need to break cycles, a set will still be necessary.
+///
+/// The type \c T must be default constructable to a null value that will be
+/// ignored. It is an error to insert such a value, and popping elements will
+/// never produce such a value. It is expected to be used with common nullable
+/// types like pointers or optionals.
+///
+/// Internally this uses a vector to store the worklist and a map to identify
+/// existing elements in the worklist. Both of these may be customized, but the
+/// map must support the basic DenseMap API for mapping from a T to an integer
+/// index into the vector.
+///
+/// A partial specialization is provided to automatically select a SmallVector
+/// and a SmallDenseMap if custom data structures are not provided.
+template <typename T, typename VectorT = std::vector<T>,
+          typename MapT = DenseMap<T, ptrdiff_t>>
+class PriorityWorklist {
+public:
+  using value_type = T;
+  using key_type = T;
+  using reference = T&;
+  using const_reference = const T&;
+  using size_type = typename MapT::size_type;
+
+  /// Construct an empty PriorityWorklist
+  PriorityWorklist() = default;
+
+  /// Determine if the PriorityWorklist is empty or not.
+  bool empty() const {
+    return V.empty();
+  }
+
+  /// Returns the number of elements in the worklist.
+  size_type size() const {
+    return M.size();
+  }
+
+  /// Count the number of elements of a given key in the PriorityWorklist.
+  /// \returns 0 if the element is not in the PriorityWorklist, 1 if it is.
+  size_type count(const key_type &key) const {
+    return M.count(key);
+  }
+
+  /// Return the last element of the PriorityWorklist.
+  const T &back() const {
+    assert(!empty() && "Cannot call back() on empty PriorityWorklist!");
+    return V.back();
+  }
+
+  /// Insert a new element into the PriorityWorklist.
+  /// \returns true if the element was inserted into the PriorityWorklist.
+  bool insert(const T &X) {
+    assert(X != T() && "Cannot insert a null (default constructed) value!");
+    auto InsertResult = M.insert({X, V.size()});
+    if (InsertResult.second) {
+      // Fresh value, just append it to the vector.
+      V.push_back(X);
+      return true;
+    }
+
+    auto &Index = InsertResult.first->second;
+    assert(V[Index] == X && "Value not actually at index in map!");
+    if (Index != (ptrdiff_t)(V.size() - 1)) {
+      // If the element isn't at the back, null it out and append a fresh one.
+      V[Index] = T();
+      Index = (ptrdiff_t)V.size();
+      V.push_back(X);
+    }
+    return false;
+  }
+
+  /// Insert a sequence of new elements into the PriorityWorklist.
+  template <typename SequenceT>
+  typename std::enable_if<!std::is_convertible<SequenceT, T>::value>::type
+  insert(SequenceT &&Input) {
+    if (std::begin(Input) == std::end(Input))
+      // Nothing to do for an empty input sequence.
+      return;
+
+    // First pull the input sequence into the vector as a bulk append
+    // operation.
+    ptrdiff_t StartIndex = V.size();
+    V.insert(V.end(), std::begin(Input), std::end(Input));
+    // Now walk backwards fixing up the index map and deleting any duplicates.
+    for (ptrdiff_t i = V.size() - 1; i >= StartIndex; --i) {
+      auto InsertResult = M.insert({V[i], i});
+      if (InsertResult.second)
+        continue;
+
+      // If the existing index is before this insert's start, nuke that one and
+      // move it up.
+      ptrdiff_t &Index = InsertResult.first->second;
+      if (Index < StartIndex) {
+        V[Index] = T();
+        Index = i;
+        continue;
+      }
+
+      // Otherwise the existing one comes first so just clear out the value in
+      // this slot.
+      V[i] = T();
+    }
+  }
+
+  /// Remove the last element of the PriorityWorklist.
+  void pop_back() {
+    assert(!empty() && "Cannot remove an element when empty!");
+    assert(back() != T() && "Cannot have a null element at the back!");
+    M.erase(back());
+    do {
+      V.pop_back();
+    } while (!V.empty() && V.back() == T());
+  }
+
+  LLVM_NODISCARD T pop_back_val() {
+    T Ret = back();
+    pop_back();
+    return Ret;
+  }
+
+  /// Erase an item from the worklist.
+  ///
+  /// Note that this is constant time due to the nature of the worklist implementation.
+  bool erase(const T& X) {
+    auto I = M.find(X);
+    if (I == M.end())
+      return false;
+
+    assert(V[I->second] == X && "Value not actually at index in map!");
+    if (I->second == (ptrdiff_t)(V.size() - 1)) {
+      do {
+        V.pop_back();
+      } while (!V.empty() && V.back() == T());
+    } else {
+      V[I->second] = T();
+    }
+    M.erase(I);
+    return true;
+  }
+
+  /// Erase items from the set vector based on a predicate function.
+  ///
+  /// This is intended to be equivalent to the following code, if we could
+  /// write it:
+  ///
+  /// \code
+  ///   V.erase(remove_if(V, P), V.end());
+  /// \endcode
+  ///
+  /// However, PriorityWorklist doesn't expose non-const iterators, making any
+  /// algorithm like remove_if impossible to use.
+  ///
+  /// \returns true if any element is removed.
+  template <typename UnaryPredicate>
+  bool erase_if(UnaryPredicate P) {
+    typename VectorT::iterator E =
+        remove_if(V, TestAndEraseFromMap<UnaryPredicate>(P, M));
+    if (E == V.end())
+      return false;
+    for (auto I = V.begin(); I != E; ++I)
+      if (*I != T())
+        M[*I] = I - V.begin();
+    V.erase(E, V.end());
+    return true;
+  }
+
+  /// Reverse the items in the PriorityWorklist.
+  ///
+  /// This does an in-place reversal. Other kinds of reverse aren't easy to
+  /// support in the face of the worklist semantics.
+
+  /// Completely clear the PriorityWorklist
+  void clear() {
+    M.clear();
+    V.clear();
+  }
+
+private:
+  /// A wrapper predicate designed for use with std::remove_if.
+  ///
+  /// This predicate wraps a predicate suitable for use with std::remove_if to
+  /// call M.erase(x) on each element which is slated for removal. This just
+  /// allows the predicate to be move only which we can't do with lambdas
+  /// today.
+  template <typename UnaryPredicateT>
+  class TestAndEraseFromMap {
+    UnaryPredicateT P;
+    MapT &M;
+
+  public:
+    TestAndEraseFromMap(UnaryPredicateT P, MapT &M)
+        : P(std::move(P)), M(M) {}
+
+    bool operator()(const T &Arg) {
+      if (Arg == T())
+        // Skip null values in the PriorityWorklist.
+        return false;
+
+      if (P(Arg)) {
+        M.erase(Arg);
+        return true;
+      }
+      return false;
+    }
+  };
+
+  /// The map from value to index in the vector.
+  MapT M;
+
+  /// The vector of elements in insertion order.
+  VectorT V;
+};
+
+/// A version of \c PriorityWorklist that selects small size optimized data
+/// structures for the vector and map.
+template <typename T, unsigned N>
+class SmallPriorityWorklist
+    : public PriorityWorklist<T, SmallVector<T, N>,
+                              SmallDenseMap<T, ptrdiff_t>> {
+public:
+  SmallPriorityWorklist() = default;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_PRIORITYWORKLIST_H
diff --git a/linux-x64/clang/include/llvm/ADT/SCCIterator.h b/linux-x64/clang/include/llvm/ADT/SCCIterator.h
new file mode 100644
index 0000000..784a58d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SCCIterator.h
@@ -0,0 +1,237 @@
+//===- ADT/SCCIterator.h - Strongly Connected Comp. Iter. -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This builds on the llvm/ADT/GraphTraits.h file to find the strongly
+/// connected components (SCCs) of a graph in O(N+E) time using Tarjan's DFS
+/// algorithm.
+///
+/// The SCC iterator has the important property that if a node in SCC S1 has an
+/// edge to a node in SCC S2, then it visits S1 *after* S2.
+///
+/// To visit S1 *before* S2, use the scc_iterator on the Inverse graph. (NOTE:
+/// This requires some simple wrappers and is not supported yet.)
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SCCITERATOR_H
+#define LLVM_ADT_SCCITERATOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/iterator.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <vector>
+
+namespace llvm {
+
+/// \brief Enumerate the SCCs of a directed graph in reverse topological order
+/// of the SCC DAG.
+///
+/// This is implemented using Tarjan's DFS algorithm using an internal stack to
+/// build up a vector of nodes in a particular SCC. Note that it is a forward
+/// iterator and thus you cannot backtrack or re-visit nodes.
+template <class GraphT, class GT = GraphTraits<GraphT>>
+class scc_iterator : public iterator_facade_base<
+                         scc_iterator<GraphT, GT>, std::forward_iterator_tag,
+                         const std::vector<typename GT::NodeRef>, ptrdiff_t> {
+  using NodeRef = typename GT::NodeRef;
+  using ChildItTy = typename GT::ChildIteratorType;
+  using SccTy = std::vector<NodeRef>;
+  using reference = typename scc_iterator::reference;
+
+  /// Element of VisitStack during DFS.
+  struct StackElement {
+    NodeRef Node;         ///< The current node pointer.
+    ChildItTy NextChild;  ///< The next child, modified inplace during DFS.
+    unsigned MinVisited;  ///< Minimum uplink value of all children of Node.
+
+    StackElement(NodeRef Node, const ChildItTy &Child, unsigned Min)
+        : Node(Node), NextChild(Child), MinVisited(Min) {}
+
+    bool operator==(const StackElement &Other) const {
+      return Node == Other.Node &&
+             NextChild == Other.NextChild &&
+             MinVisited == Other.MinVisited;
+    }
+  };
+
+  /// The visit counters used to detect when a complete SCC is on the stack.
+  /// visitNum is the global counter.
+  ///
+  /// nodeVisitNumbers are per-node visit numbers, also used as DFS flags.
+  unsigned visitNum;
+  DenseMap<NodeRef, unsigned> nodeVisitNumbers;
+
+  /// Stack holding nodes of the SCC.
+  std::vector<NodeRef> SCCNodeStack;
+
+  /// The current SCC, retrieved using operator*().
+  SccTy CurrentSCC;
+
+  /// DFS stack, Used to maintain the ordering.  The top contains the current
+  /// node, the next child to visit, and the minimum uplink value of all child
+  std::vector<StackElement> VisitStack;
+
+  /// A single "visit" within the non-recursive DFS traversal.
+  void DFSVisitOne(NodeRef N);
+
+  /// The stack-based DFS traversal; defined below.
+  void DFSVisitChildren();
+
+  /// Compute the next SCC using the DFS traversal.
+  void GetNextSCC();
+
+  scc_iterator(NodeRef entryN) : visitNum(0) {
+    DFSVisitOne(entryN);
+    GetNextSCC();
+  }
+
+  /// End is when the DFS stack is empty.
+  scc_iterator() = default;
+
+public:
+  static scc_iterator begin(const GraphT &G) {
+    return scc_iterator(GT::getEntryNode(G));
+  }
+  static scc_iterator end(const GraphT &) { return scc_iterator(); }
+
+  /// \brief Direct loop termination test which is more efficient than
+  /// comparison with \c end().
+  bool isAtEnd() const {
+    assert(!CurrentSCC.empty() || VisitStack.empty());
+    return CurrentSCC.empty();
+  }
+
+  bool operator==(const scc_iterator &x) const {
+    return VisitStack == x.VisitStack && CurrentSCC == x.CurrentSCC;
+  }
+
+  scc_iterator &operator++() {
+    GetNextSCC();
+    return *this;
+  }
+
+  reference operator*() const {
+    assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!");
+    return CurrentSCC;
+  }
+
+  /// \brief Test if the current SCC has a loop.
+  ///
+  /// If the SCC has more than one node, this is trivially true.  If not, it may
+  /// still contain a loop if the node has an edge back to itself.
+  bool hasLoop() const;
+
+  /// This informs the \c scc_iterator that the specified \c Old node
+  /// has been deleted, and \c New is to be used in its place.
+  void ReplaceNode(NodeRef Old, NodeRef New) {
+    assert(nodeVisitNumbers.count(Old) && "Old not in scc_iterator?");
+    nodeVisitNumbers[New] = nodeVisitNumbers[Old];
+    nodeVisitNumbers.erase(Old);
+  }
+};
+
+template <class GraphT, class GT>
+void scc_iterator<GraphT, GT>::DFSVisitOne(NodeRef N) {
+  ++visitNum;
+  nodeVisitNumbers[N] = visitNum;
+  SCCNodeStack.push_back(N);
+  VisitStack.push_back(StackElement(N, GT::child_begin(N), visitNum));
+#if 0 // Enable if needed when debugging.
+  dbgs() << "TarjanSCC: Node " << N <<
+        " : visitNum = " << visitNum << "\n";
+#endif
+}
+
+template <class GraphT, class GT>
+void scc_iterator<GraphT, GT>::DFSVisitChildren() {
+  assert(!VisitStack.empty());
+  while (VisitStack.back().NextChild != GT::child_end(VisitStack.back().Node)) {
+    // TOS has at least one more child so continue DFS
+    NodeRef childN = *VisitStack.back().NextChild++;
+    typename DenseMap<NodeRef, unsigned>::iterator Visited =
+        nodeVisitNumbers.find(childN);
+    if (Visited == nodeVisitNumbers.end()) {
+      // this node has never been seen.
+      DFSVisitOne(childN);
+      continue;
+    }
+
+    unsigned childNum = Visited->second;
+    if (VisitStack.back().MinVisited > childNum)
+      VisitStack.back().MinVisited = childNum;
+  }
+}
+
+template <class GraphT, class GT> void scc_iterator<GraphT, GT>::GetNextSCC() {
+  CurrentSCC.clear(); // Prepare to compute the next SCC
+  while (!VisitStack.empty()) {
+    DFSVisitChildren();
+
+    // Pop the leaf on top of the VisitStack.
+    NodeRef visitingN = VisitStack.back().Node;
+    unsigned minVisitNum = VisitStack.back().MinVisited;
+    assert(VisitStack.back().NextChild == GT::child_end(visitingN));
+    VisitStack.pop_back();
+
+    // Propagate MinVisitNum to parent so we can detect the SCC starting node.
+    if (!VisitStack.empty() && VisitStack.back().MinVisited > minVisitNum)
+      VisitStack.back().MinVisited = minVisitNum;
+
+#if 0 // Enable if needed when debugging.
+    dbgs() << "TarjanSCC: Popped node " << visitingN <<
+          " : minVisitNum = " << minVisitNum << "; Node visit num = " <<
+          nodeVisitNumbers[visitingN] << "\n";
+#endif
+
+    if (minVisitNum != nodeVisitNumbers[visitingN])
+      continue;
+
+    // A full SCC is on the SCCNodeStack!  It includes all nodes below
+    // visitingN on the stack.  Copy those nodes to CurrentSCC,
+    // reset their minVisit values, and return (this suspends
+    // the DFS traversal till the next ++).
+    do {
+      CurrentSCC.push_back(SCCNodeStack.back());
+      SCCNodeStack.pop_back();
+      nodeVisitNumbers[CurrentSCC.back()] = ~0U;
+    } while (CurrentSCC.back() != visitingN);
+    return;
+  }
+}
+
+template <class GraphT, class GT>
+bool scc_iterator<GraphT, GT>::hasLoop() const {
+    assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!");
+    if (CurrentSCC.size() > 1)
+      return true;
+    NodeRef N = CurrentSCC.front();
+    for (ChildItTy CI = GT::child_begin(N), CE = GT::child_end(N); CI != CE;
+         ++CI)
+      if (*CI == N)
+        return true;
+    return false;
+  }
+
+/// \brief Construct the begin iterator for a deduced graph type T.
+template <class T> scc_iterator<T> scc_begin(const T &G) {
+  return scc_iterator<T>::begin(G);
+}
+
+/// \brief Construct the end iterator for a deduced graph type T.
+template <class T> scc_iterator<T> scc_end(const T &G) {
+  return scc_iterator<T>::end(G);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SCCITERATOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/STLExtras.h b/linux-x64/clang/include/llvm/ADT/STLExtras.h
new file mode 100644
index 0000000..051b900
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/STLExtras.h
@@ -0,0 +1,1181 @@
+//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains some templates that are useful if you are working with the
+// STL at all.
+//
+// No library is required when using these functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STLEXTRAS_H
+#define LLVM_ADT_STLEXTRAS_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <functional>
+#include <initializer_list>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#ifdef EXPENSIVE_CHECKS
+#include <random> // for std::mt19937
+#endif
+
+namespace llvm {
+
+// Only used by compiler if both template types are the same.  Useful when
+// using SFINAE to test for the existence of member functions.
+template <typename T, T> struct SameType;
+
+namespace detail {
+
+template <typename RangeT>
+using IterOfRange = decltype(std::begin(std::declval<RangeT &>()));
+
+template <typename RangeT>
+using ValueOfRange = typename std::remove_reference<decltype(
+    *std::begin(std::declval<RangeT &>()))>::type;
+
+} // end namespace detail
+
+//===----------------------------------------------------------------------===//
+//     Extra additions to <functional>
+//===----------------------------------------------------------------------===//
+
+template <class Ty> struct identity {
+  using argument_type = Ty;
+
+  Ty &operator()(Ty &self) const {
+    return self;
+  }
+  const Ty &operator()(const Ty &self) const {
+    return self;
+  }
+};
+
+template <class Ty> struct less_ptr {
+  bool operator()(const Ty* left, const Ty* right) const {
+    return *left < *right;
+  }
+};
+
+template <class Ty> struct greater_ptr {
+  bool operator()(const Ty* left, const Ty* right) const {
+    return *right < *left;
+  }
+};
+
+/// An efficient, type-erasing, non-owning reference to a callable. This is
+/// intended for use as the type of a function parameter that is not used
+/// after the function in question returns.
+///
+/// This class does not own the callable, so it is not in general safe to store
+/// a function_ref.
+template<typename Fn> class function_ref;
+
+template<typename Ret, typename ...Params>
+class function_ref<Ret(Params...)> {
+  Ret (*callback)(intptr_t callable, Params ...params) = nullptr;
+  intptr_t callable;
+
+  template<typename Callable>
+  static Ret callback_fn(intptr_t callable, Params ...params) {
+    return (*reinterpret_cast<Callable*>(callable))(
+        std::forward<Params>(params)...);
+  }
+
+public:
+  function_ref() = default;
+  function_ref(std::nullptr_t) {}
+
+  template <typename Callable>
+  function_ref(Callable &&callable,
+               typename std::enable_if<
+                   !std::is_same<typename std::remove_reference<Callable>::type,
+                                 function_ref>::value>::type * = nullptr)
+      : callback(callback_fn<typename std::remove_reference<Callable>::type>),
+        callable(reinterpret_cast<intptr_t>(&callable)) {}
+
+  Ret operator()(Params ...params) const {
+    return callback(callable, std::forward<Params>(params)...);
+  }
+
+  operator bool() const { return callback; }
+};
+
+// deleter - Very very very simple method that is used to invoke operator
+// delete on something.  It is used like this:
+//
+//   for_each(V.begin(), B.end(), deleter<Interval>);
+template <class T>
+inline void deleter(T *Ptr) {
+  delete Ptr;
+}
+
+//===----------------------------------------------------------------------===//
+//     Extra additions to <iterator>
+//===----------------------------------------------------------------------===//
+
+namespace adl_detail {
+
+using std::begin;
+
+template <typename ContainerTy>
+auto adl_begin(ContainerTy &&container)
+    -> decltype(begin(std::forward<ContainerTy>(container))) {
+  return begin(std::forward<ContainerTy>(container));
+}
+
+using std::end;
+
+template <typename ContainerTy>
+auto adl_end(ContainerTy &&container)
+    -> decltype(end(std::forward<ContainerTy>(container))) {
+  return end(std::forward<ContainerTy>(container));
+}
+
+using std::swap;
+
+template <typename T>
+void adl_swap(T &&lhs, T &&rhs) noexcept(noexcept(swap(std::declval<T>(),
+                                                       std::declval<T>()))) {
+  swap(std::forward<T>(lhs), std::forward<T>(rhs));
+}
+
+} // end namespace adl_detail
+
+template <typename ContainerTy>
+auto adl_begin(ContainerTy &&container)
+    -> decltype(adl_detail::adl_begin(std::forward<ContainerTy>(container))) {
+  return adl_detail::adl_begin(std::forward<ContainerTy>(container));
+}
+
+template <typename ContainerTy>
+auto adl_end(ContainerTy &&container)
+    -> decltype(adl_detail::adl_end(std::forward<ContainerTy>(container))) {
+  return adl_detail::adl_end(std::forward<ContainerTy>(container));
+}
+
+template <typename T>
+void adl_swap(T &&lhs, T &&rhs) noexcept(
+    noexcept(adl_detail::adl_swap(std::declval<T>(), std::declval<T>()))) {
+  adl_detail::adl_swap(std::forward<T>(lhs), std::forward<T>(rhs));
+}
+
+// mapped_iterator - This is a simple iterator adapter that causes a function to
+// be applied whenever operator* is invoked on the iterator.
+
+template <typename ItTy, typename FuncTy,
+          typename FuncReturnTy =
+            decltype(std::declval<FuncTy>()(*std::declval<ItTy>()))>
+class mapped_iterator
+    : public iterator_adaptor_base<
+             mapped_iterator<ItTy, FuncTy>, ItTy,
+             typename std::iterator_traits<ItTy>::iterator_category,
+             typename std::remove_reference<FuncReturnTy>::type> {
+public:
+  mapped_iterator(ItTy U, FuncTy F)
+    : mapped_iterator::iterator_adaptor_base(std::move(U)), F(std::move(F)) {}
+
+  ItTy getCurrent() { return this->I; }
+
+  FuncReturnTy operator*() { return F(*this->I); }
+
+private:
+  FuncTy F;
+};
+
+// map_iterator - Provide a convenient way to create mapped_iterators, just like
+// make_pair is useful for creating pairs...
+template <class ItTy, class FuncTy>
+inline mapped_iterator<ItTy, FuncTy> map_iterator(ItTy I, FuncTy F) {
+  return mapped_iterator<ItTy, FuncTy>(std::move(I), std::move(F));
+}
+
+/// Helper to determine if type T has a member called rbegin().
+template <typename Ty> class has_rbegin_impl {
+  using yes = char[1];
+  using no = char[2];
+
+  template <typename Inner>
+  static yes& test(Inner *I, decltype(I->rbegin()) * = nullptr);
+
+  template <typename>
+  static no& test(...);
+
+public:
+  static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
+};
+
+/// Metafunction to determine if T& or T has a member called rbegin().
+template <typename Ty>
+struct has_rbegin : has_rbegin_impl<typename std::remove_reference<Ty>::type> {
+};
+
+// Returns an iterator_range over the given container which iterates in reverse.
+// Note that the container must have rbegin()/rend() methods for this to work.
+template <typename ContainerTy>
+auto reverse(ContainerTy &&C,
+             typename std::enable_if<has_rbegin<ContainerTy>::value>::type * =
+                 nullptr) -> decltype(make_range(C.rbegin(), C.rend())) {
+  return make_range(C.rbegin(), C.rend());
+}
+
+// Returns a std::reverse_iterator wrapped around the given iterator.
+template <typename IteratorTy>
+std::reverse_iterator<IteratorTy> make_reverse_iterator(IteratorTy It) {
+  return std::reverse_iterator<IteratorTy>(It);
+}
+
+// Returns an iterator_range over the given container which iterates in reverse.
+// Note that the container must have begin()/end() methods which return
+// bidirectional iterators for this to work.
+template <typename ContainerTy>
+auto reverse(
+    ContainerTy &&C,
+    typename std::enable_if<!has_rbegin<ContainerTy>::value>::type * = nullptr)
+    -> decltype(make_range(llvm::make_reverse_iterator(std::end(C)),
+                           llvm::make_reverse_iterator(std::begin(C)))) {
+  return make_range(llvm::make_reverse_iterator(std::end(C)),
+                    llvm::make_reverse_iterator(std::begin(C)));
+}
+
+/// An iterator adaptor that filters the elements of given inner iterators.
+///
+/// The predicate parameter should be a callable object that accepts the wrapped
+/// iterator's reference type and returns a bool. When incrementing or
+/// decrementing the iterator, it will call the predicate on each element and
+/// skip any where it returns false.
+///
+/// \code
+///   int A[] = { 1, 2, 3, 4 };
+///   auto R = make_filter_range(A, [](int N) { return N % 2 == 1; });
+///   // R contains { 1, 3 }.
+/// \endcode
+template <typename WrappedIteratorT, typename PredicateT>
+class filter_iterator
+    : public iterator_adaptor_base<
+          filter_iterator<WrappedIteratorT, PredicateT>, WrappedIteratorT,
+          typename std::common_type<
+              std::forward_iterator_tag,
+              typename std::iterator_traits<
+                  WrappedIteratorT>::iterator_category>::type> {
+  using BaseT = iterator_adaptor_base<
+      filter_iterator<WrappedIteratorT, PredicateT>, WrappedIteratorT,
+      typename std::common_type<
+          std::forward_iterator_tag,
+          typename std::iterator_traits<WrappedIteratorT>::iterator_category>::
+          type>;
+
+  struct PayloadType {
+    WrappedIteratorT End;
+    PredicateT Pred;
+  };
+
+  Optional<PayloadType> Payload;
+
+  void findNextValid() {
+    assert(Payload && "Payload should be engaged when findNextValid is called");
+    while (this->I != Payload->End && !Payload->Pred(*this->I))
+      BaseT::operator++();
+  }
+
+  // Construct the begin iterator. The begin iterator requires to know where end
+  // is, so that it can properly stop when it hits end.
+  filter_iterator(WrappedIteratorT Begin, WrappedIteratorT End, PredicateT Pred)
+      : BaseT(std::move(Begin)),
+        Payload(PayloadType{std::move(End), std::move(Pred)}) {
+    findNextValid();
+  }
+
+  // Construct the end iterator. It's not incrementable, so Payload doesn't
+  // have to be engaged.
+  filter_iterator(WrappedIteratorT End) : BaseT(End) {}
+
+public:
+  using BaseT::operator++;
+
+  filter_iterator &operator++() {
+    BaseT::operator++();
+    findNextValid();
+    return *this;
+  }
+
+  template <typename RT, typename PT>
+  friend iterator_range<filter_iterator<detail::IterOfRange<RT>, PT>>
+  make_filter_range(RT &&, PT);
+};
+
+/// Convenience function that takes a range of elements and a predicate,
+/// and return a new filter_iterator range.
+///
+/// FIXME: Currently if RangeT && is a rvalue reference to a temporary, the
+/// lifetime of that temporary is not kept by the returned range object, and the
+/// temporary is going to be dropped on the floor after the make_iterator_range
+/// full expression that contains this function call.
+template <typename RangeT, typename PredicateT>
+iterator_range<filter_iterator<detail::IterOfRange<RangeT>, PredicateT>>
+make_filter_range(RangeT &&Range, PredicateT Pred) {
+  using FilterIteratorT =
+      filter_iterator<detail::IterOfRange<RangeT>, PredicateT>;
+  return make_range(FilterIteratorT(std::begin(std::forward<RangeT>(Range)),
+                                    std::end(std::forward<RangeT>(Range)),
+                                    std::move(Pred)),
+                    FilterIteratorT(std::end(std::forward<RangeT>(Range))));
+}
+
+// forward declarations required by zip_shortest/zip_first
+template <typename R, typename UnaryPredicate>
+bool all_of(R &&range, UnaryPredicate P);
+
+template <size_t... I> struct index_sequence;
+
+template <class... Ts> struct index_sequence_for;
+
+namespace detail {
+
+using std::declval;
+
+// We have to alias this since inlining the actual type at the usage site
+// in the parameter list of iterator_facade_base<> below ICEs MSVC 2017.
+template<typename... Iters> struct ZipTupleType {
+  using type = std::tuple<decltype(*declval<Iters>())...>;
+};
+
+template <typename ZipType, typename... Iters>
+using zip_traits = iterator_facade_base<
+    ZipType, typename std::common_type<std::bidirectional_iterator_tag,
+                                       typename std::iterator_traits<
+                                           Iters>::iterator_category...>::type,
+    // ^ TODO: Implement random access methods.
+    typename ZipTupleType<Iters...>::type,
+    typename std::iterator_traits<typename std::tuple_element<
+        0, std::tuple<Iters...>>::type>::difference_type,
+    // ^ FIXME: This follows boost::make_zip_iterator's assumption that all
+    // inner iterators have the same difference_type. It would fail if, for
+    // instance, the second field's difference_type were non-numeric while the
+    // first is.
+    typename ZipTupleType<Iters...>::type *,
+    typename ZipTupleType<Iters...>::type>;
+
+template <typename ZipType, typename... Iters>
+struct zip_common : public zip_traits<ZipType, Iters...> {
+  using Base = zip_traits<ZipType, Iters...>;
+  using value_type = typename Base::value_type;
+
+  std::tuple<Iters...> iterators;
+
+protected:
+  template <size_t... Ns> value_type deref(index_sequence<Ns...>) const {
+    return value_type(*std::get<Ns>(iterators)...);
+  }
+
+  template <size_t... Ns>
+  decltype(iterators) tup_inc(index_sequence<Ns...>) const {
+    return std::tuple<Iters...>(std::next(std::get<Ns>(iterators))...);
+  }
+
+  template <size_t... Ns>
+  decltype(iterators) tup_dec(index_sequence<Ns...>) const {
+    return std::tuple<Iters...>(std::prev(std::get<Ns>(iterators))...);
+  }
+
+public:
+  zip_common(Iters &&... ts) : iterators(std::forward<Iters>(ts)...) {}
+
+  value_type operator*() { return deref(index_sequence_for<Iters...>{}); }
+
+  const value_type operator*() const {
+    return deref(index_sequence_for<Iters...>{});
+  }
+
+  ZipType &operator++() {
+    iterators = tup_inc(index_sequence_for<Iters...>{});
+    return *reinterpret_cast<ZipType *>(this);
+  }
+
+  ZipType &operator--() {
+    static_assert(Base::IsBidirectional,
+                  "All inner iterators must be at least bidirectional.");
+    iterators = tup_dec(index_sequence_for<Iters...>{});
+    return *reinterpret_cast<ZipType *>(this);
+  }
+};
+
+template <typename... Iters>
+struct zip_first : public zip_common<zip_first<Iters...>, Iters...> {
+  using Base = zip_common<zip_first<Iters...>, Iters...>;
+
+  bool operator==(const zip_first<Iters...> &other) const {
+    return std::get<0>(this->iterators) == std::get<0>(other.iterators);
+  }
+
+  zip_first(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
+};
+
+template <typename... Iters>
+class zip_shortest : public zip_common<zip_shortest<Iters...>, Iters...> {
+  template <size_t... Ns>
+  bool test(const zip_shortest<Iters...> &other, index_sequence<Ns...>) const {
+    return all_of(std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
+                                              std::get<Ns>(other.iterators)...},
+                  identity<bool>{});
+  }
+
+public:
+  using Base = zip_common<zip_shortest<Iters...>, Iters...>;
+
+  zip_shortest(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
+
+  bool operator==(const zip_shortest<Iters...> &other) const {
+    return !test(other, index_sequence_for<Iters...>{});
+  }
+};
+
+template <template <typename...> class ItType, typename... Args> class zippy {
+public:
+  using iterator = ItType<decltype(std::begin(std::declval<Args>()))...>;
+  using iterator_category = typename iterator::iterator_category;
+  using value_type = typename iterator::value_type;
+  using difference_type = typename iterator::difference_type;
+  using pointer = typename iterator::pointer;
+  using reference = typename iterator::reference;
+
+private:
+  std::tuple<Args...> ts;
+
+  template <size_t... Ns> iterator begin_impl(index_sequence<Ns...>) const {
+    return iterator(std::begin(std::get<Ns>(ts))...);
+  }
+  template <size_t... Ns> iterator end_impl(index_sequence<Ns...>) const {
+    return iterator(std::end(std::get<Ns>(ts))...);
+  }
+
+public:
+  zippy(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
+
+  iterator begin() const { return begin_impl(index_sequence_for<Args...>{}); }
+  iterator end() const { return end_impl(index_sequence_for<Args...>{}); }
+};
+
+} // end namespace detail
+
+/// zip iterator for two or more iteratable types.
+template <typename T, typename U, typename... Args>
+detail::zippy<detail::zip_shortest, T, U, Args...> zip(T &&t, U &&u,
+                                                       Args &&... args) {
+  return detail::zippy<detail::zip_shortest, T, U, Args...>(
+      std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
+}
+
+/// zip iterator that, for the sake of efficiency, assumes the first iteratee to
+/// be the shortest.
+template <typename T, typename U, typename... Args>
+detail::zippy<detail::zip_first, T, U, Args...> zip_first(T &&t, U &&u,
+                                                          Args &&... args) {
+  return detail::zippy<detail::zip_first, T, U, Args...>(
+      std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
+}
+
+/// Iterator wrapper that concatenates sequences together.
+///
+/// This can concatenate different iterators, even with different types, into
+/// a single iterator provided the value types of all the concatenated
+/// iterators expose `reference` and `pointer` types that can be converted to
+/// `ValueT &` and `ValueT *` respectively. It doesn't support more
+/// interesting/customized pointer or reference types.
+///
+/// Currently this only supports forward or higher iterator categories as
+/// inputs and always exposes a forward iterator interface.
+template <typename ValueT, typename... IterTs>
+class concat_iterator
+    : public iterator_facade_base<concat_iterator<ValueT, IterTs...>,
+                                  std::forward_iterator_tag, ValueT> {
+  using BaseT = typename concat_iterator::iterator_facade_base;
+
+  /// We store both the current and end iterators for each concatenated
+  /// sequence in a tuple of pairs.
+  ///
+  /// Note that something like iterator_range seems nice at first here, but the
+  /// range properties are of little benefit and end up getting in the way
+  /// because we need to do mutation on the current iterators.
+  std::tuple<std::pair<IterTs, IterTs>...> IterPairs;
+
+  /// Attempts to increment a specific iterator.
+  ///
+  /// Returns true if it was able to increment the iterator. Returns false if
+  /// the iterator is already at the end iterator.
+  template <size_t Index> bool incrementHelper() {
+    auto &IterPair = std::get<Index>(IterPairs);
+    if (IterPair.first == IterPair.second)
+      return false;
+
+    ++IterPair.first;
+    return true;
+  }
+
+  /// Increments the first non-end iterator.
+  ///
+  /// It is an error to call this with all iterators at the end.
+  template <size_t... Ns> void increment(index_sequence<Ns...>) {
+    // Build a sequence of functions to increment each iterator if possible.
+    bool (concat_iterator::*IncrementHelperFns[])() = {
+        &concat_iterator::incrementHelper<Ns>...};
+
+    // Loop over them, and stop as soon as we succeed at incrementing one.
+    for (auto &IncrementHelperFn : IncrementHelperFns)
+      if ((this->*IncrementHelperFn)())
+        return;
+
+    llvm_unreachable("Attempted to increment an end concat iterator!");
+  }
+
+  /// Returns null if the specified iterator is at the end. Otherwise,
+  /// dereferences the iterator and returns the address of the resulting
+  /// reference.
+  template <size_t Index> ValueT *getHelper() const {
+    auto &IterPair = std::get<Index>(IterPairs);
+    if (IterPair.first == IterPair.second)
+      return nullptr;
+
+    return &*IterPair.first;
+  }
+
+  /// Finds the first non-end iterator, dereferences, and returns the resulting
+  /// reference.
+  ///
+  /// It is an error to call this with all iterators at the end.
+  template <size_t... Ns> ValueT &get(index_sequence<Ns...>) const {
+    // Build a sequence of functions to get from iterator if possible.
+    ValueT *(concat_iterator::*GetHelperFns[])() const = {
+        &concat_iterator::getHelper<Ns>...};
+
+    // Loop over them, and return the first result we find.
+    for (auto &GetHelperFn : GetHelperFns)
+      if (ValueT *P = (this->*GetHelperFn)())
+        return *P;
+
+    llvm_unreachable("Attempted to get a pointer from an end concat iterator!");
+  }
+
+public:
+  /// Constructs an iterator from a squence of ranges.
+  ///
+  /// We need the full range to know how to switch between each of the
+  /// iterators.
+  template <typename... RangeTs>
+  explicit concat_iterator(RangeTs &&... Ranges)
+      : IterPairs({std::begin(Ranges), std::end(Ranges)}...) {}
+
+  using BaseT::operator++;
+
+  concat_iterator &operator++() {
+    increment(index_sequence_for<IterTs...>());
+    return *this;
+  }
+
+  ValueT &operator*() const { return get(index_sequence_for<IterTs...>()); }
+
+  bool operator==(const concat_iterator &RHS) const {
+    return IterPairs == RHS.IterPairs;
+  }
+};
+
+namespace detail {
+
+/// Helper to store a sequence of ranges being concatenated and access them.
+///
+/// This is designed to facilitate providing actual storage when temporaries
+/// are passed into the constructor such that we can use it as part of range
+/// based for loops.
+template <typename ValueT, typename... RangeTs> class concat_range {
+public:
+  using iterator =
+      concat_iterator<ValueT,
+                      decltype(std::begin(std::declval<RangeTs &>()))...>;
+
+private:
+  std::tuple<RangeTs...> Ranges;
+
+  template <size_t... Ns> iterator begin_impl(index_sequence<Ns...>) {
+    return iterator(std::get<Ns>(Ranges)...);
+  }
+  template <size_t... Ns> iterator end_impl(index_sequence<Ns...>) {
+    return iterator(make_range(std::end(std::get<Ns>(Ranges)),
+                               std::end(std::get<Ns>(Ranges)))...);
+  }
+
+public:
+  concat_range(RangeTs &&... Ranges)
+      : Ranges(std::forward<RangeTs>(Ranges)...) {}
+
+  iterator begin() { return begin_impl(index_sequence_for<RangeTs...>{}); }
+  iterator end() { return end_impl(index_sequence_for<RangeTs...>{}); }
+};
+
+} // end namespace detail
+
+/// Concatenated range across two or more ranges.
+///
+/// The desired value type must be explicitly specified.
+template <typename ValueT, typename... RangeTs>
+detail::concat_range<ValueT, RangeTs...> concat(RangeTs &&... Ranges) {
+  static_assert(sizeof...(RangeTs) > 1,
+                "Need more than one range to concatenate!");
+  return detail::concat_range<ValueT, RangeTs...>(
+      std::forward<RangeTs>(Ranges)...);
+}
+
+//===----------------------------------------------------------------------===//
+//     Extra additions to <utility>
+//===----------------------------------------------------------------------===//
+
+/// \brief Function object to check whether the first component of a std::pair
+/// compares less than the first component of another std::pair.
+struct less_first {
+  template <typename T> bool operator()(const T &lhs, const T &rhs) const {
+    return lhs.first < rhs.first;
+  }
+};
+
+/// \brief Function object to check whether the second component of a std::pair
+/// compares less than the second component of another std::pair.
+struct less_second {
+  template <typename T> bool operator()(const T &lhs, const T &rhs) const {
+    return lhs.second < rhs.second;
+  }
+};
+
+// A subset of N3658. More stuff can be added as-needed.
+
+/// \brief Represents a compile-time sequence of integers.
+template <class T, T... I> struct integer_sequence {
+  using value_type = T;
+
+  static constexpr size_t size() { return sizeof...(I); }
+};
+
+/// \brief Alias for the common case of a sequence of size_ts.
+template <size_t... I>
+struct index_sequence : integer_sequence<std::size_t, I...> {};
+
+template <std::size_t N, std::size_t... I>
+struct build_index_impl : build_index_impl<N - 1, N - 1, I...> {};
+template <std::size_t... I>
+struct build_index_impl<0, I...> : index_sequence<I...> {};
+
+/// \brief Creates a compile-time integer sequence for a parameter pack.
+template <class... Ts>
+struct index_sequence_for : build_index_impl<sizeof...(Ts)> {};
+
+/// Utility type to build an inheritance chain that makes it easy to rank
+/// overload candidates.
+template <int N> struct rank : rank<N - 1> {};
+template <> struct rank<0> {};
+
+/// \brief traits class for checking whether type T is one of any of the given
+/// types in the variadic list.
+template <typename T, typename... Ts> struct is_one_of {
+  static const bool value = false;
+};
+
+template <typename T, typename U, typename... Ts>
+struct is_one_of<T, U, Ts...> {
+  static const bool value =
+      std::is_same<T, U>::value || is_one_of<T, Ts...>::value;
+};
+
+/// \brief traits class for checking whether type T is a base class for all
+///  the given types in the variadic list.
+template <typename T, typename... Ts> struct are_base_of {
+  static const bool value = true;
+};
+
+template <typename T, typename U, typename... Ts>
+struct are_base_of<T, U, Ts...> {
+  static const bool value =
+      std::is_base_of<T, U>::value && are_base_of<T, Ts...>::value;
+};
+
+//===----------------------------------------------------------------------===//
+//     Extra additions for arrays
+//===----------------------------------------------------------------------===//
+
+/// Find the length of an array.
+template <class T, std::size_t N>
+constexpr inline size_t array_lengthof(T (&)[N]) {
+  return N;
+}
+
+/// Adapt std::less<T> for array_pod_sort.
+template<typename T>
+inline int array_pod_sort_comparator(const void *P1, const void *P2) {
+  if (std::less<T>()(*reinterpret_cast<const T*>(P1),
+                     *reinterpret_cast<const T*>(P2)))
+    return -1;
+  if (std::less<T>()(*reinterpret_cast<const T*>(P2),
+                     *reinterpret_cast<const T*>(P1)))
+    return 1;
+  return 0;
+}
+
+/// get_array_pod_sort_comparator - This is an internal helper function used to
+/// get type deduction of T right.
+template<typename T>
+inline int (*get_array_pod_sort_comparator(const T &))
+             (const void*, const void*) {
+  return array_pod_sort_comparator<T>;
+}
+
+/// array_pod_sort - This sorts an array with the specified start and end
+/// extent.  This is just like std::sort, except that it calls qsort instead of
+/// using an inlined template.  qsort is slightly slower than std::sort, but
+/// most sorts are not performance critical in LLVM and std::sort has to be
+/// template instantiated for each type, leading to significant measured code
+/// bloat.  This function should generally be used instead of std::sort where
+/// possible.
+///
+/// This function assumes that you have simple POD-like types that can be
+/// compared with std::less and can be moved with memcpy.  If this isn't true,
+/// you should use std::sort.
+///
+/// NOTE: If qsort_r were portable, we could allow a custom comparator and
+/// default to std::less.
+template<class IteratorTy>
+inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
+  // Don't inefficiently call qsort with one element or trigger undefined
+  // behavior with an empty sequence.
+  auto NElts = End - Start;
+  if (NElts <= 1) return;
+#ifdef EXPENSIVE_CHECKS
+  std::mt19937 Generator(std::random_device{}());
+  std::shuffle(Start, End, Generator);
+#endif
+  qsort(&*Start, NElts, sizeof(*Start), get_array_pod_sort_comparator(*Start));
+}
+
+template <class IteratorTy>
+inline void array_pod_sort(
+    IteratorTy Start, IteratorTy End,
+    int (*Compare)(
+        const typename std::iterator_traits<IteratorTy>::value_type *,
+        const typename std::iterator_traits<IteratorTy>::value_type *)) {
+  // Don't inefficiently call qsort with one element or trigger undefined
+  // behavior with an empty sequence.
+  auto NElts = End - Start;
+  if (NElts <= 1) return;
+#ifdef EXPENSIVE_CHECKS
+  std::mt19937 Generator(std::random_device{}());
+  std::shuffle(Start, End, Generator);
+#endif
+  qsort(&*Start, NElts, sizeof(*Start),
+        reinterpret_cast<int (*)(const void *, const void *)>(Compare));
+}
+
+// Provide wrappers to std::sort which shuffle the elements before sorting
+// to help uncover non-deterministic behavior (PR35135).
+template <typename IteratorTy>
+inline void sort(IteratorTy Start, IteratorTy End) {
+#ifdef EXPENSIVE_CHECKS
+  std::mt19937 Generator(std::random_device{}());
+  std::shuffle(Start, End, Generator);
+#endif
+  std::sort(Start, End);
+}
+
+template <typename IteratorTy, typename Compare>
+inline void sort(IteratorTy Start, IteratorTy End, Compare Comp) {
+#ifdef EXPENSIVE_CHECKS
+  std::mt19937 Generator(std::random_device{}());
+  std::shuffle(Start, End, Generator);
+#endif
+  std::sort(Start, End, Comp);
+}
+
+//===----------------------------------------------------------------------===//
+//     Extra additions to <algorithm>
+//===----------------------------------------------------------------------===//
+
+/// For a container of pointers, deletes the pointers and then clears the
+/// container.
+template<typename Container>
+void DeleteContainerPointers(Container &C) {
+  for (auto V : C)
+    delete V;
+  C.clear();
+}
+
+/// In a container of pairs (usually a map) whose second element is a pointer,
+/// deletes the second elements and then clears the container.
+template<typename Container>
+void DeleteContainerSeconds(Container &C) {
+  for (auto &V : C)
+    delete V.second;
+  C.clear();
+}
+
+/// Provide wrappers to std::for_each which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+UnaryPredicate for_each(R &&Range, UnaryPredicate P) {
+  return std::for_each(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::all_of which take ranges instead of having to pass
+/// begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+bool all_of(R &&Range, UnaryPredicate P) {
+  return std::all_of(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::any_of which take ranges instead of having to pass
+/// begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+bool any_of(R &&Range, UnaryPredicate P) {
+  return std::any_of(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::none_of which take ranges instead of having to pass
+/// begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+bool none_of(R &&Range, UnaryPredicate P) {
+  return std::none_of(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::find which take ranges instead of having to pass
+/// begin/end explicitly.
+template <typename R, typename T>
+auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range)) {
+  return std::find(adl_begin(Range), adl_end(Range), Val);
+}
+
+/// Provide wrappers to std::find_if which take ranges instead of having to pass
+/// begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) {
+  return std::find_if(adl_begin(Range), adl_end(Range), P);
+}
+
+template <typename R, typename UnaryPredicate>
+auto find_if_not(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) {
+  return std::find_if_not(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::remove_if which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+auto remove_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) {
+  return std::remove_if(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::copy_if which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename OutputIt, typename UnaryPredicate>
+OutputIt copy_if(R &&Range, OutputIt Out, UnaryPredicate P) {
+  return std::copy_if(adl_begin(Range), adl_end(Range), Out, P);
+}
+
+template <typename R, typename OutputIt>
+OutputIt copy(R &&Range, OutputIt Out) {
+  return std::copy(adl_begin(Range), adl_end(Range), Out);
+}
+
+/// Wrapper function around std::find to detect if an element exists
+/// in a container.
+template <typename R, typename E>
+bool is_contained(R &&Range, const E &Element) {
+  return std::find(adl_begin(Range), adl_end(Range), Element) != adl_end(Range);
+}
+
+/// Wrapper function around std::count to count the number of times an element
+/// \p Element occurs in the given range \p Range.
+template <typename R, typename E>
+auto count(R &&Range, const E &Element) ->
+    typename std::iterator_traits<decltype(adl_begin(Range))>::difference_type {
+  return std::count(adl_begin(Range), adl_end(Range), Element);
+}
+
+/// Wrapper function around std::count_if to count the number of times an
+/// element satisfying a given predicate occurs in a range.
+template <typename R, typename UnaryPredicate>
+auto count_if(R &&Range, UnaryPredicate P) ->
+    typename std::iterator_traits<decltype(adl_begin(Range))>::difference_type {
+  return std::count_if(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Wrapper function around std::transform to apply a function to a range and
+/// store the result elsewhere.
+template <typename R, typename OutputIt, typename UnaryPredicate>
+OutputIt transform(R &&Range, OutputIt d_first, UnaryPredicate P) {
+  return std::transform(adl_begin(Range), adl_end(Range), d_first, P);
+}
+
+/// Provide wrappers to std::partition which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+auto partition(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) {
+  return std::partition(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::lower_bound which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename ForwardIt>
+auto lower_bound(R &&Range, ForwardIt I) -> decltype(adl_begin(Range)) {
+  return std::lower_bound(adl_begin(Range), adl_end(Range), I);
+}
+
+/// \brief Given a range of type R, iterate the entire range and return a
+/// SmallVector with elements of the vector.  This is useful, for example,
+/// when you want to iterate a range and then sort the results.
+template <unsigned Size, typename R>
+SmallVector<typename std::remove_const<detail::ValueOfRange<R>>::type, Size>
+to_vector(R &&Range) {
+  return {adl_begin(Range), adl_end(Range)};
+}
+
+/// Provide a container algorithm similar to C++ Library Fundamentals v2's
+/// `erase_if` which is equivalent to:
+///
+///   C.erase(remove_if(C, pred), C.end());
+///
+/// This version works for any container with an erase method call accepting
+/// two iterators.
+template <typename Container, typename UnaryPredicate>
+void erase_if(Container &C, UnaryPredicate P) {
+  C.erase(remove_if(C, P), C.end());
+}
+
+//===----------------------------------------------------------------------===//
+//     Extra additions to <memory>
+//===----------------------------------------------------------------------===//
+
+// Implement make_unique according to N3656.
+
+/// \brief Constructs a `new T()` with the given args and returns a
+///        `unique_ptr<T>` which owns the object.
+///
+/// Example:
+///
+///     auto p = make_unique<int>();
+///     auto p = make_unique<std::tuple<int, int>>(0, 1);
+template <class T, class... Args>
+typename std::enable_if<!std::is_array<T>::value, std::unique_ptr<T>>::type
+make_unique(Args &&... args) {
+  return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
+}
+
+/// \brief Constructs a `new T[n]` with the given args and returns a
+///        `unique_ptr<T[]>` which owns the object.
+///
+/// \param n size of the new array.
+///
+/// Example:
+///
+///     auto p = make_unique<int[]>(2); // value-initializes the array with 0's.
+template <class T>
+typename std::enable_if<std::is_array<T>::value && std::extent<T>::value == 0,
+                        std::unique_ptr<T>>::type
+make_unique(size_t n) {
+  return std::unique_ptr<T>(new typename std::remove_extent<T>::type[n]());
+}
+
+/// This function isn't used and is only here to provide better compile errors.
+template <class T, class... Args>
+typename std::enable_if<std::extent<T>::value != 0>::type
+make_unique(Args &&...) = delete;
+
+struct FreeDeleter {
+  void operator()(void* v) {
+    ::free(v);
+  }
+};
+
+template<typename First, typename Second>
+struct pair_hash {
+  size_t operator()(const std::pair<First, Second> &P) const {
+    return std::hash<First>()(P.first) * 31 + std::hash<Second>()(P.second);
+  }
+};
+
+/// A functor like C++14's std::less<void> in its absence.
+struct less {
+  template <typename A, typename B> bool operator()(A &&a, B &&b) const {
+    return std::forward<A>(a) < std::forward<B>(b);
+  }
+};
+
+/// A functor like C++14's std::equal<void> in its absence.
+struct equal {
+  template <typename A, typename B> bool operator()(A &&a, B &&b) const {
+    return std::forward<A>(a) == std::forward<B>(b);
+  }
+};
+
+/// Binary functor that adapts to any other binary functor after dereferencing
+/// operands.
+template <typename T> struct deref {
+  T func;
+
+  // Could be further improved to cope with non-derivable functors and
+  // non-binary functors (should be a variadic template member function
+  // operator()).
+  template <typename A, typename B>
+  auto operator()(A &lhs, B &rhs) const -> decltype(func(*lhs, *rhs)) {
+    assert(lhs);
+    assert(rhs);
+    return func(*lhs, *rhs);
+  }
+};
+
+namespace detail {
+
+template <typename R> class enumerator_iter;
+
+template <typename R> struct result_pair {
+  friend class enumerator_iter<R>;
+
+  result_pair() = default;
+  result_pair(std::size_t Index, IterOfRange<R> Iter)
+      : Index(Index), Iter(Iter) {}
+
+  result_pair<R> &operator=(const result_pair<R> &Other) {
+    Index = Other.Index;
+    Iter = Other.Iter;
+    return *this;
+  }
+
+  std::size_t index() const { return Index; }
+  const ValueOfRange<R> &value() const { return *Iter; }
+  ValueOfRange<R> &value() { return *Iter; }
+
+private:
+  std::size_t Index = std::numeric_limits<std::size_t>::max();
+  IterOfRange<R> Iter;
+};
+
+template <typename R>
+class enumerator_iter
+    : public iterator_facade_base<
+          enumerator_iter<R>, std::forward_iterator_tag, result_pair<R>,
+          typename std::iterator_traits<IterOfRange<R>>::difference_type,
+          typename std::iterator_traits<IterOfRange<R>>::pointer,
+          typename std::iterator_traits<IterOfRange<R>>::reference> {
+  using result_type = result_pair<R>;
+
+public:
+  explicit enumerator_iter(IterOfRange<R> EndIter)
+      : Result(std::numeric_limits<size_t>::max(), EndIter) {}
+
+  enumerator_iter(std::size_t Index, IterOfRange<R> Iter)
+      : Result(Index, Iter) {}
+
+  result_type &operator*() { return Result; }
+  const result_type &operator*() const { return Result; }
+
+  enumerator_iter<R> &operator++() {
+    assert(Result.Index != std::numeric_limits<size_t>::max());
+    ++Result.Iter;
+    ++Result.Index;
+    return *this;
+  }
+
+  bool operator==(const enumerator_iter<R> &RHS) const {
+    // Don't compare indices here, only iterators.  It's possible for an end
+    // iterator to have different indices depending on whether it was created
+    // by calling std::end() versus incrementing a valid iterator.
+    return Result.Iter == RHS.Result.Iter;
+  }
+
+  enumerator_iter<R> &operator=(const enumerator_iter<R> &Other) {
+    Result = Other.Result;
+    return *this;
+  }
+
+private:
+  result_type Result;
+};
+
+template <typename R> class enumerator {
+public:
+  explicit enumerator(R &&Range) : TheRange(std::forward<R>(Range)) {}
+
+  enumerator_iter<R> begin() {
+    return enumerator_iter<R>(0, std::begin(TheRange));
+  }
+
+  enumerator_iter<R> end() {
+    return enumerator_iter<R>(std::end(TheRange));
+  }
+
+private:
+  R TheRange;
+};
+
+} // end namespace detail
+
+/// Given an input range, returns a new range whose values are are pair (A,B)
+/// such that A is the 0-based index of the item in the sequence, and B is
+/// the value from the original sequence.  Example:
+///
+/// std::vector<char> Items = {'A', 'B', 'C', 'D'};
+/// for (auto X : enumerate(Items)) {
+///   printf("Item %d - %c\n", X.index(), X.value());
+/// }
+///
+/// Output:
+///   Item 0 - A
+///   Item 1 - B
+///   Item 2 - C
+///   Item 3 - D
+///
+template <typename R> detail::enumerator<R> enumerate(R &&TheRange) {
+  return detail::enumerator<R>(std::forward<R>(TheRange));
+}
+
+namespace detail {
+
+template <typename F, typename Tuple, std::size_t... I>
+auto apply_tuple_impl(F &&f, Tuple &&t, index_sequence<I...>)
+    -> decltype(std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...)) {
+  return std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...);
+}
+
+} // end namespace detail
+
+/// Given an input tuple (a1, a2, ..., an), pass the arguments of the
+/// tuple variadically to f as if by calling f(a1, a2, ..., an) and
+/// return the result.
+template <typename F, typename Tuple>
+auto apply_tuple(F &&f, Tuple &&t) -> decltype(detail::apply_tuple_impl(
+    std::forward<F>(f), std::forward<Tuple>(t),
+    build_index_impl<
+        std::tuple_size<typename std::decay<Tuple>::type>::value>{})) {
+  using Indices = build_index_impl<
+      std::tuple_size<typename std::decay<Tuple>::type>::value>;
+
+  return detail::apply_tuple_impl(std::forward<F>(f), std::forward<Tuple>(t),
+                                  Indices{});
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STLEXTRAS_H
diff --git a/linux-x64/clang/include/llvm/ADT/ScopeExit.h b/linux-x64/clang/include/llvm/ADT/ScopeExit.h
new file mode 100644
index 0000000..bd13755
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ScopeExit.h
@@ -0,0 +1,66 @@
+//===- llvm/ADT/ScopeExit.h - Execute code at scope exit --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the make_scope_exit function, which executes user-defined
+// cleanup logic at scope exit.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SCOPE_EXIT_H
+#define LLVM_ADT_SCOPE_EXIT_H
+
+#include "llvm/Support/Compiler.h"
+
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+namespace detail {
+
+template <typename Callable> class scope_exit {
+  Callable ExitFunction;
+  bool Engaged = true; // False once moved-from or release()d.
+
+public:
+  template <typename Fp>
+  explicit scope_exit(Fp &&F) : ExitFunction(std::forward<Fp>(F)) {}
+
+  scope_exit(scope_exit &&Rhs)
+      : ExitFunction(std::move(Rhs.ExitFunction)), Engaged(Rhs.Engaged) {
+    Rhs.release();
+  }
+  scope_exit(const scope_exit &) = delete;
+  scope_exit &operator=(scope_exit &&) = delete;
+  scope_exit &operator=(const scope_exit &) = delete;
+
+  void release() { Engaged = false; }
+
+  ~scope_exit() {
+    if (Engaged)
+      ExitFunction();
+  }
+};
+
+} // end namespace detail
+
+// Keeps the callable object that is passed in, and execute it at the
+// destruction of the returned object (usually at the scope exit where the
+// returned object is kept).
+//
+// Interface is specified by p0052r2.
+template <typename Callable>
+LLVM_NODISCARD detail::scope_exit<typename std::decay<Callable>::type>
+make_scope_exit(Callable &&F) {
+  return detail::scope_exit<typename std::decay<Callable>::type>(
+      std::forward<Callable>(F));
+}
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/ScopedHashTable.h b/linux-x64/clang/include/llvm/ADT/ScopedHashTable.h
new file mode 100644
index 0000000..22b0c1b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ScopedHashTable.h
@@ -0,0 +1,264 @@
+//===- ScopedHashTable.h - A simple scoped hash table -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements an efficient scoped hash table, which is useful for
+// things like dominator-based optimizations.  This allows clients to do things
+// like this:
+//
+//  ScopedHashTable<int, int> HT;
+//  {
+//    ScopedHashTableScope<int, int> Scope1(HT);
+//    HT.insert(0, 0);
+//    HT.insert(1, 1);
+//    {
+//      ScopedHashTableScope<int, int> Scope2(HT);
+//      HT.insert(0, 42);
+//    }
+//  }
+//
+// Looking up the value for "0" in the Scope2 block will return 42.  Looking
+// up the value for 0 before 42 is inserted or after Scope2 is popped will
+// return 0.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SCOPEDHASHTABLE_H
+#define LLVM_ADT_SCOPEDHASHTABLE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <new>
+
+namespace llvm {
+
+template <typename K, typename V, typename KInfo = DenseMapInfo<K>,
+          typename AllocatorTy = MallocAllocator>
+class ScopedHashTable;
+
+template <typename K, typename V>
+class ScopedHashTableVal {
+  ScopedHashTableVal *NextInScope;
+  ScopedHashTableVal *NextForKey;
+  K Key;
+  V Val;
+
+  ScopedHashTableVal(const K &key, const V &val) : Key(key), Val(val) {}
+
+public:
+  const K &getKey() const { return Key; }
+  const V &getValue() const { return Val; }
+  V &getValue() { return Val; }
+
+  ScopedHashTableVal *getNextForKey() { return NextForKey; }
+  const ScopedHashTableVal *getNextForKey() const { return NextForKey; }
+  ScopedHashTableVal *getNextInScope() { return NextInScope; }
+
+  template <typename AllocatorTy>
+  static ScopedHashTableVal *Create(ScopedHashTableVal *nextInScope,
+                                    ScopedHashTableVal *nextForKey,
+                                    const K &key, const V &val,
+                                    AllocatorTy &Allocator) {
+    ScopedHashTableVal *New = Allocator.template Allocate<ScopedHashTableVal>();
+    // Set up the value.
+    new (New) ScopedHashTableVal(key, val);
+    New->NextInScope = nextInScope;
+    New->NextForKey = nextForKey;
+    return New;
+  }
+
+  template <typename AllocatorTy> void Destroy(AllocatorTy &Allocator) {
+    // Free memory referenced by the item.
+    this->~ScopedHashTableVal();
+    Allocator.Deallocate(this);
+  }
+};
+
+template <typename K, typename V, typename KInfo = DenseMapInfo<K>,
+          typename AllocatorTy = MallocAllocator>
+class ScopedHashTableScope {
+  /// HT - The hashtable that we are active for.
+  ScopedHashTable<K, V, KInfo, AllocatorTy> &HT;
+
+  /// PrevScope - This is the scope that we are shadowing in HT.
+  ScopedHashTableScope *PrevScope;
+
+  /// LastValInScope - This is the last value that was inserted for this scope
+  /// or null if none have been inserted yet.
+  ScopedHashTableVal<K, V> *LastValInScope;
+
+public:
+  ScopedHashTableScope(ScopedHashTable<K, V, KInfo, AllocatorTy> &HT);
+  ScopedHashTableScope(ScopedHashTableScope &) = delete;
+  ScopedHashTableScope &operator=(ScopedHashTableScope &) = delete;
+  ~ScopedHashTableScope();
+
+  ScopedHashTableScope *getParentScope() { return PrevScope; }
+  const ScopedHashTableScope *getParentScope() const { return PrevScope; }
+
+private:
+  friend class ScopedHashTable<K, V, KInfo, AllocatorTy>;
+
+  ScopedHashTableVal<K, V> *getLastValInScope() {
+    return LastValInScope;
+  }
+
+  void setLastValInScope(ScopedHashTableVal<K, V> *Val) {
+    LastValInScope = Val;
+  }
+};
+
+template <typename K, typename V, typename KInfo = DenseMapInfo<K>>
+class ScopedHashTableIterator {
+  ScopedHashTableVal<K, V> *Node;
+
+public:
+  ScopedHashTableIterator(ScopedHashTableVal<K, V> *node) : Node(node) {}
+
+  V &operator*() const {
+    assert(Node && "Dereference end()");
+    return Node->getValue();
+  }
+  V *operator->() const {
+    return &Node->getValue();
+  }
+
+  bool operator==(const ScopedHashTableIterator &RHS) const {
+    return Node == RHS.Node;
+  }
+  bool operator!=(const ScopedHashTableIterator &RHS) const {
+    return Node != RHS.Node;
+  }
+
+  inline ScopedHashTableIterator& operator++() {          // Preincrement
+    assert(Node && "incrementing past end()");
+    Node = Node->getNextForKey();
+    return *this;
+  }
+  ScopedHashTableIterator operator++(int) {        // Postincrement
+    ScopedHashTableIterator tmp = *this; ++*this; return tmp;
+  }
+};
+
+template <typename K, typename V, typename KInfo, typename AllocatorTy>
+class ScopedHashTable {
+public:
+  /// ScopeTy - This is a helpful typedef that allows clients to get easy access
+  /// to the name of the scope for this hash table.
+  using ScopeTy = ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
+  using size_type = unsigned;
+
+private:
+  friend class ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
+
+  using ValTy = ScopedHashTableVal<K, V>;
+
+  DenseMap<K, ValTy*, KInfo> TopLevelMap;
+  ScopeTy *CurScope = nullptr;
+
+  AllocatorTy Allocator;
+
+public:
+  ScopedHashTable() = default;
+  ScopedHashTable(AllocatorTy A) : Allocator(A) {}
+  ScopedHashTable(const ScopedHashTable &) = delete;
+  ScopedHashTable &operator=(const ScopedHashTable &) = delete;
+
+  ~ScopedHashTable() {
+    assert(!CurScope && TopLevelMap.empty() && "Scope imbalance!");
+  }
+
+  /// Access to the allocator.
+  AllocatorTy &getAllocator() { return Allocator; }
+  const AllocatorTy &getAllocator() const { return Allocator; }
+
+  /// Return 1 if the specified key is in the table, 0 otherwise.
+  size_type count(const K &Key) const {
+    return TopLevelMap.count(Key);
+  }
+
+  V lookup(const K &Key) const {
+    auto I = TopLevelMap.find(Key);
+    if (I != TopLevelMap.end())
+      return I->second->getValue();
+
+    return V();
+  }
+
+  void insert(const K &Key, const V &Val) {
+    insertIntoScope(CurScope, Key, Val);
+  }
+
+  using iterator = ScopedHashTableIterator<K, V, KInfo>;
+
+  iterator end() { return iterator(0); }
+
+  iterator begin(const K &Key) {
+    typename DenseMap<K, ValTy*, KInfo>::iterator I =
+      TopLevelMap.find(Key);
+    if (I == TopLevelMap.end()) return end();
+    return iterator(I->second);
+  }
+
+  ScopeTy *getCurScope() { return CurScope; }
+  const ScopeTy *getCurScope() const { return CurScope; }
+
+  /// insertIntoScope - This inserts the specified key/value at the specified
+  /// (possibly not the current) scope.  While it is ok to insert into a scope
+  /// that isn't the current one, it isn't ok to insert *underneath* an existing
+  /// value of the specified key.
+  void insertIntoScope(ScopeTy *S, const K &Key, const V &Val) {
+    assert(S && "No scope active!");
+    ScopedHashTableVal<K, V> *&KeyEntry = TopLevelMap[Key];
+    KeyEntry = ValTy::Create(S->getLastValInScope(), KeyEntry, Key, Val,
+                             Allocator);
+    S->setLastValInScope(KeyEntry);
+  }
+};
+
+/// ScopedHashTableScope ctor - Install this as the current scope for the hash
+/// table.
+template <typename K, typename V, typename KInfo, typename Allocator>
+ScopedHashTableScope<K, V, KInfo, Allocator>::
+  ScopedHashTableScope(ScopedHashTable<K, V, KInfo, Allocator> &ht) : HT(ht) {
+  PrevScope = HT.CurScope;
+  HT.CurScope = this;
+  LastValInScope = nullptr;
+}
+
+template <typename K, typename V, typename KInfo, typename Allocator>
+ScopedHashTableScope<K, V, KInfo, Allocator>::~ScopedHashTableScope() {
+  assert(HT.CurScope == this && "Scope imbalance!");
+  HT.CurScope = PrevScope;
+
+  // Pop and delete all values corresponding to this scope.
+  while (ScopedHashTableVal<K, V> *ThisEntry = LastValInScope) {
+    // Pop this value out of the TopLevelMap.
+    if (!ThisEntry->getNextForKey()) {
+      assert(HT.TopLevelMap[ThisEntry->getKey()] == ThisEntry &&
+             "Scope imbalance!");
+      HT.TopLevelMap.erase(ThisEntry->getKey());
+    } else {
+      ScopedHashTableVal<K, V> *&KeyEntry = HT.TopLevelMap[ThisEntry->getKey()];
+      assert(KeyEntry == ThisEntry && "Scope imbalance!");
+      KeyEntry = ThisEntry->getNextForKey();
+    }
+
+    // Pop this value out of the scope.
+    LastValInScope = ThisEntry->getNextInScope();
+
+    // Delete this entry.
+    ThisEntry->Destroy(HT.getAllocator());
+  }
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SCOPEDHASHTABLE_H
diff --git a/linux-x64/clang/include/llvm/ADT/Sequence.h b/linux-x64/clang/include/llvm/ADT/Sequence.h
new file mode 100644
index 0000000..3d4a897
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/Sequence.h
@@ -0,0 +1,84 @@
+//===- Sequence.h - Utility for producing sequences of values ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This routine provides some synthesis utilities to produce sequences of
+/// values. The names are intentionally kept very short as they tend to occur
+/// in common and widely used contexts.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SEQUENCE_H
+#define LLVM_ADT_SEQUENCE_H
+
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include <algorithm>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+namespace detail {
+
+template <typename ValueT>
+class value_sequence_iterator
+    : public iterator_facade_base<value_sequence_iterator<ValueT>,
+                                  std::random_access_iterator_tag,
+                                  const ValueT> {
+  using BaseT = typename value_sequence_iterator::iterator_facade_base;
+
+  ValueT Value;
+
+public:
+  using difference_type = typename BaseT::difference_type;
+  using reference = typename BaseT::reference;
+
+  value_sequence_iterator() = default;
+  value_sequence_iterator(const value_sequence_iterator &) = default;
+  value_sequence_iterator(value_sequence_iterator &&Arg)
+      : Value(std::move(Arg.Value)) {}
+
+  template <typename U, typename Enabler = decltype(ValueT(std::declval<U>()))>
+  value_sequence_iterator(U &&Value) : Value(std::forward<U>(Value)) {}
+
+  value_sequence_iterator &operator+=(difference_type N) {
+    Value += N;
+    return *this;
+  }
+  value_sequence_iterator &operator-=(difference_type N) {
+    Value -= N;
+    return *this;
+  }
+  using BaseT::operator-;
+  difference_type operator-(const value_sequence_iterator &RHS) const {
+    return Value - RHS.Value;
+  }
+
+  bool operator==(const value_sequence_iterator &RHS) const {
+    return Value == RHS.Value;
+  }
+  bool operator<(const value_sequence_iterator &RHS) const {
+    return Value < RHS.Value;
+  }
+
+  reference operator*() const { return Value; }
+};
+
+} // end namespace detail
+
+template <typename ValueT>
+iterator_range<detail::value_sequence_iterator<ValueT>> seq(ValueT Begin,
+                                                            ValueT End) {
+  return make_range(detail::value_sequence_iterator<ValueT>(Begin),
+                    detail::value_sequence_iterator<ValueT>(End));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SEQUENCE_H
diff --git a/linux-x64/clang/include/llvm/ADT/SetOperations.h b/linux-x64/clang/include/llvm/ADT/SetOperations.h
new file mode 100644
index 0000000..7c9f2fb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SetOperations.h
@@ -0,0 +1,71 @@
+//===-- llvm/ADT/SetOperations.h - Generic Set Operations -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines generic set operations that may be used on set's of
+// different types, and different element types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SETOPERATIONS_H
+#define LLVM_ADT_SETOPERATIONS_H
+
+namespace llvm {
+
+/// set_union(A, B) - Compute A := A u B, return whether A changed.
+///
+template <class S1Ty, class S2Ty>
+bool set_union(S1Ty &S1, const S2Ty &S2) {
+  bool Changed = false;
+
+  for (typename S2Ty::const_iterator SI = S2.begin(), SE = S2.end();
+       SI != SE; ++SI)
+    if (S1.insert(*SI).second)
+      Changed = true;
+
+  return Changed;
+}
+
+/// set_intersect(A, B) - Compute A := A ^ B
+/// Identical to set_intersection, except that it works on set<>'s and
+/// is nicer to use.  Functionally, this iterates through S1, removing
+/// elements that are not contained in S2.
+///
+template <class S1Ty, class S2Ty>
+void set_intersect(S1Ty &S1, const S2Ty &S2) {
+   for (typename S1Ty::iterator I = S1.begin(); I != S1.end();) {
+     const auto &E = *I;
+     ++I;
+     if (!S2.count(E)) S1.erase(E);   // Erase element if not in S2
+   }
+}
+
+/// set_difference(A, B) - Return A - B
+///
+template <class S1Ty, class S2Ty>
+S1Ty set_difference(const S1Ty &S1, const S2Ty &S2) {
+  S1Ty Result;
+  for (typename S1Ty::const_iterator SI = S1.begin(), SE = S1.end();
+       SI != SE; ++SI)
+    if (!S2.count(*SI))       // if the element is not in set2
+      Result.insert(*SI);
+  return Result;
+}
+
+/// set_subtract(A, B) - Compute A := A - B
+///
+template <class S1Ty, class S2Ty>
+void set_subtract(S1Ty &S1, const S2Ty &S2) {
+  for (typename S2Ty::const_iterator SI = S2.begin(), SE = S2.end();
+       SI != SE; ++SI)
+    S1.erase(*SI);
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/SetVector.h b/linux-x64/clang/include/llvm/ADT/SetVector.h
new file mode 100644
index 0000000..04ed52f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SetVector.h
@@ -0,0 +1,312 @@
+//===- llvm/ADT/SetVector.h - Set with insert order iteration ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a set that has insertion order iteration
+// characteristics. This is useful for keeping a set of things that need to be
+// visited later but in a deterministic order (insertion order). The interface
+// is purposefully minimal.
+//
+// This file defines SetVector and SmallSetVector, which performs no allocations
+// if the SetVector has less than a certain number of elements.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SETVECTOR_H
+#define LLVM_ADT_SETVECTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <vector>
+
+namespace llvm {
+
+/// \brief A vector that has set insertion semantics.
+///
+/// This adapter class provides a way to keep a set of things that also has the
+/// property of a deterministic iteration order. The order of iteration is the
+/// order of insertion.
+template <typename T, typename Vector = std::vector<T>,
+          typename Set = DenseSet<T>>
+class SetVector {
+public:
+  using value_type = T;
+  using key_type = T;
+  using reference = T&;
+  using const_reference = const T&;
+  using set_type = Set;
+  using vector_type = Vector;
+  using iterator = typename vector_type::const_iterator;
+  using const_iterator = typename vector_type::const_iterator;
+  using reverse_iterator = typename vector_type::const_reverse_iterator;
+  using const_reverse_iterator = typename vector_type::const_reverse_iterator;
+  using size_type = typename vector_type::size_type;
+
+  /// \brief Construct an empty SetVector
+  SetVector() = default;
+
+  /// \brief Initialize a SetVector with a range of elements
+  template<typename It>
+  SetVector(It Start, It End) {
+    insert(Start, End);
+  }
+
+  ArrayRef<T> getArrayRef() const { return vector_; }
+
+  /// Clear the SetVector and return the underlying vector.
+  Vector takeVector() {
+    set_.clear();
+    return std::move(vector_);
+  }
+
+  /// \brief Determine if the SetVector is empty or not.
+  bool empty() const {
+    return vector_.empty();
+  }
+
+  /// \brief Determine the number of elements in the SetVector.
+  size_type size() const {
+    return vector_.size();
+  }
+
+  /// \brief Get an iterator to the beginning of the SetVector.
+  iterator begin() {
+    return vector_.begin();
+  }
+
+  /// \brief Get a const_iterator to the beginning of the SetVector.
+  const_iterator begin() const {
+    return vector_.begin();
+  }
+
+  /// \brief Get an iterator to the end of the SetVector.
+  iterator end() {
+    return vector_.end();
+  }
+
+  /// \brief Get a const_iterator to the end of the SetVector.
+  const_iterator end() const {
+    return vector_.end();
+  }
+
+  /// \brief Get an reverse_iterator to the end of the SetVector.
+  reverse_iterator rbegin() {
+    return vector_.rbegin();
+  }
+
+  /// \brief Get a const_reverse_iterator to the end of the SetVector.
+  const_reverse_iterator rbegin() const {
+    return vector_.rbegin();
+  }
+
+  /// \brief Get a reverse_iterator to the beginning of the SetVector.
+  reverse_iterator rend() {
+    return vector_.rend();
+  }
+
+  /// \brief Get a const_reverse_iterator to the beginning of the SetVector.
+  const_reverse_iterator rend() const {
+    return vector_.rend();
+  }
+
+  /// \brief Return the first element of the SetVector.
+  const T &front() const {
+    assert(!empty() && "Cannot call front() on empty SetVector!");
+    return vector_.front();
+  }
+
+  /// \brief Return the last element of the SetVector.
+  const T &back() const {
+    assert(!empty() && "Cannot call back() on empty SetVector!");
+    return vector_.back();
+  }
+
+  /// \brief Index into the SetVector.
+  const_reference operator[](size_type n) const {
+    assert(n < vector_.size() && "SetVector access out of range!");
+    return vector_[n];
+  }
+
+  /// \brief Insert a new element into the SetVector.
+  /// \returns true if the element was inserted into the SetVector.
+  bool insert(const value_type &X) {
+    bool result = set_.insert(X).second;
+    if (result)
+      vector_.push_back(X);
+    return result;
+  }
+
+  /// \brief Insert a range of elements into the SetVector.
+  template<typename It>
+  void insert(It Start, It End) {
+    for (; Start != End; ++Start)
+      if (set_.insert(*Start).second)
+        vector_.push_back(*Start);
+  }
+
+  /// \brief Remove an item from the set vector.
+  bool remove(const value_type& X) {
+    if (set_.erase(X)) {
+      typename vector_type::iterator I = find(vector_, X);
+      assert(I != vector_.end() && "Corrupted SetVector instances!");
+      vector_.erase(I);
+      return true;
+    }
+    return false;
+  }
+
+  /// Erase a single element from the set vector.
+  /// \returns an iterator pointing to the next element that followed the
+  /// element erased. This is the end of the SetVector if the last element is
+  /// erased.
+  iterator erase(iterator I) {
+    const key_type &V = *I;
+    assert(set_.count(V) && "Corrupted SetVector instances!");
+    set_.erase(V);
+
+    // FIXME: No need to use the non-const iterator when built with
+    // std:vector.erase(const_iterator) as defined in C++11. This is for
+    // compatibility with non-standard libstdc++ up to 4.8 (fixed in 4.9).
+    auto NI = vector_.begin();
+    std::advance(NI, std::distance<iterator>(NI, I));
+
+    return vector_.erase(NI);
+  }
+
+  /// \brief Remove items from the set vector based on a predicate function.
+  ///
+  /// This is intended to be equivalent to the following code, if we could
+  /// write it:
+  ///
+  /// \code
+  ///   V.erase(remove_if(V, P), V.end());
+  /// \endcode
+  ///
+  /// However, SetVector doesn't expose non-const iterators, making any
+  /// algorithm like remove_if impossible to use.
+  ///
+  /// \returns true if any element is removed.
+  template <typename UnaryPredicate>
+  bool remove_if(UnaryPredicate P) {
+    typename vector_type::iterator I =
+        llvm::remove_if(vector_, TestAndEraseFromSet<UnaryPredicate>(P, set_));
+    if (I == vector_.end())
+      return false;
+    vector_.erase(I, vector_.end());
+    return true;
+  }
+
+  /// \brief Count the number of elements of a given key in the SetVector.
+  /// \returns 0 if the element is not in the SetVector, 1 if it is.
+  size_type count(const key_type &key) const {
+    return set_.count(key);
+  }
+
+  /// \brief Completely clear the SetVector
+  void clear() {
+    set_.clear();
+    vector_.clear();
+  }
+
+  /// \brief Remove the last element of the SetVector.
+  void pop_back() {
+    assert(!empty() && "Cannot remove an element from an empty SetVector!");
+    set_.erase(back());
+    vector_.pop_back();
+  }
+
+  LLVM_NODISCARD T pop_back_val() {
+    T Ret = back();
+    pop_back();
+    return Ret;
+  }
+
+  bool operator==(const SetVector &that) const {
+    return vector_ == that.vector_;
+  }
+
+  bool operator!=(const SetVector &that) const {
+    return vector_ != that.vector_;
+  }
+
+  /// \brief Compute This := This u S, return whether 'This' changed.
+  /// TODO: We should be able to use set_union from SetOperations.h, but
+  ///       SetVector interface is inconsistent with DenseSet.
+  template <class STy>
+  bool set_union(const STy &S) {
+    bool Changed = false;
+
+    for (typename STy::const_iterator SI = S.begin(), SE = S.end(); SI != SE;
+         ++SI)
+      if (insert(*SI))
+        Changed = true;
+
+    return Changed;
+  }
+
+  /// \brief Compute This := This - B
+  /// TODO: We should be able to use set_subtract from SetOperations.h, but
+  ///       SetVector interface is inconsistent with DenseSet.
+  template <class STy>
+  void set_subtract(const STy &S) {
+    for (typename STy::const_iterator SI = S.begin(), SE = S.end(); SI != SE;
+         ++SI)
+      remove(*SI);
+  }
+
+private:
+  /// \brief A wrapper predicate designed for use with std::remove_if.
+  ///
+  /// This predicate wraps a predicate suitable for use with std::remove_if to
+  /// call set_.erase(x) on each element which is slated for removal.
+  template <typename UnaryPredicate>
+  class TestAndEraseFromSet {
+    UnaryPredicate P;
+    set_type &set_;
+
+  public:
+    TestAndEraseFromSet(UnaryPredicate P, set_type &set_)
+        : P(std::move(P)), set_(set_) {}
+
+    template <typename ArgumentT>
+    bool operator()(const ArgumentT &Arg) {
+      if (P(Arg)) {
+        set_.erase(Arg);
+        return true;
+      }
+      return false;
+    }
+  };
+
+  set_type set_;         ///< The set.
+  vector_type vector_;   ///< The vector.
+};
+
+/// \brief A SetVector that performs no allocations if smaller than
+/// a certain size.
+template <typename T, unsigned N>
+class SmallSetVector
+    : public SetVector<T, SmallVector<T, N>, SmallDenseSet<T, N>> {
+public:
+  SmallSetVector() = default;
+
+  /// \brief Initialize a SmallSetVector with a range of elements
+  template<typename It>
+  SmallSetVector(It Start, It End) {
+    this->insert(Start, End);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SETVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/SmallBitVector.h b/linux-x64/clang/include/llvm/ADT/SmallBitVector.h
new file mode 100644
index 0000000..b639174
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SmallBitVector.h
@@ -0,0 +1,702 @@
+//===- llvm/ADT/SmallBitVector.h - 'Normally small' bit vectors -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SmallBitVector class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLBITVECTOR_H
+#define LLVM_ADT_SMALLBITVECTOR_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+#include <utility>
+
+namespace llvm {
+
+/// This is a 'bitvector' (really, a variable-sized bit array), optimized for
+/// the case when the array is small. It contains one pointer-sized field, which
+/// is directly used as a plain collection of bits when possible, or as a
+/// pointer to a larger heap-allocated array when necessary. This allows normal
+/// "small" cases to be fast without losing generality for large inputs.
+class SmallBitVector {
+  // TODO: In "large" mode, a pointer to a BitVector is used, leading to an
+  // unnecessary level of indirection. It would be more efficient to use a
+  // pointer to memory containing size, allocation size, and the array of bits.
+  uintptr_t X = 1;
+
+  enum {
+    // The number of bits in this class.
+    NumBaseBits = sizeof(uintptr_t) * CHAR_BIT,
+
+    // One bit is used to discriminate between small and large mode. The
+    // remaining bits are used for the small-mode representation.
+    SmallNumRawBits = NumBaseBits - 1,
+
+    // A few more bits are used to store the size of the bit set in small mode.
+    // Theoretically this is a ceil-log2. These bits are encoded in the most
+    // significant bits of the raw bits.
+    SmallNumSizeBits = (NumBaseBits == 32 ? 5 :
+                        NumBaseBits == 64 ? 6 :
+                        SmallNumRawBits),
+
+    // The remaining bits are used to store the actual set in small mode.
+    SmallNumDataBits = SmallNumRawBits - SmallNumSizeBits
+  };
+
+  static_assert(NumBaseBits == 64 || NumBaseBits == 32,
+                "Unsupported word size");
+
+public:
+  using size_type = unsigned;
+
+  // Encapsulation of a single bit.
+  class reference {
+    SmallBitVector &TheVector;
+    unsigned BitPos;
+
+  public:
+    reference(SmallBitVector &b, unsigned Idx) : TheVector(b), BitPos(Idx) {}
+
+    reference(const reference&) = default;
+
+    reference& operator=(reference t) {
+      *this = bool(t);
+      return *this;
+    }
+
+    reference& operator=(bool t) {
+      if (t)
+        TheVector.set(BitPos);
+      else
+        TheVector.reset(BitPos);
+      return *this;
+    }
+
+    operator bool() const {
+      return const_cast<const SmallBitVector &>(TheVector).operator[](BitPos);
+    }
+  };
+
+private:
+  bool isSmall() const {
+    return X & uintptr_t(1);
+  }
+
+  BitVector *getPointer() const {
+    assert(!isSmall());
+    return reinterpret_cast<BitVector *>(X);
+  }
+
+  void switchToSmall(uintptr_t NewSmallBits, size_t NewSize) {
+    X = 1;
+    setSmallSize(NewSize);
+    setSmallBits(NewSmallBits);
+  }
+
+  void switchToLarge(BitVector *BV) {
+    X = reinterpret_cast<uintptr_t>(BV);
+    assert(!isSmall() && "Tried to use an unaligned pointer");
+  }
+
+  // Return all the bits used for the "small" representation; this includes
+  // bits for the size as well as the element bits.
+  uintptr_t getSmallRawBits() const {
+    assert(isSmall());
+    return X >> 1;
+  }
+
+  void setSmallRawBits(uintptr_t NewRawBits) {
+    assert(isSmall());
+    X = (NewRawBits << 1) | uintptr_t(1);
+  }
+
+  // Return the size.
+  size_t getSmallSize() const { return getSmallRawBits() >> SmallNumDataBits; }
+
+  void setSmallSize(size_t Size) {
+    setSmallRawBits(getSmallBits() | (Size << SmallNumDataBits));
+  }
+
+  // Return the element bits.
+  uintptr_t getSmallBits() const {
+    return getSmallRawBits() & ~(~uintptr_t(0) << getSmallSize());
+  }
+
+  void setSmallBits(uintptr_t NewBits) {
+    setSmallRawBits((NewBits & ~(~uintptr_t(0) << getSmallSize())) |
+                    (getSmallSize() << SmallNumDataBits));
+  }
+
+public:
+  /// Creates an empty bitvector.
+  SmallBitVector() = default;
+
+  /// Creates a bitvector of specified number of bits. All bits are initialized
+  /// to the specified value.
+  explicit SmallBitVector(unsigned s, bool t = false) {
+    if (s <= SmallNumDataBits)
+      switchToSmall(t ? ~uintptr_t(0) : 0, s);
+    else
+      switchToLarge(new BitVector(s, t));
+  }
+
+  /// SmallBitVector copy ctor.
+  SmallBitVector(const SmallBitVector &RHS) {
+    if (RHS.isSmall())
+      X = RHS.X;
+    else
+      switchToLarge(new BitVector(*RHS.getPointer()));
+  }
+
+  SmallBitVector(SmallBitVector &&RHS) : X(RHS.X) {
+    RHS.X = 1;
+  }
+
+  ~SmallBitVector() {
+    if (!isSmall())
+      delete getPointer();
+  }
+
+  using const_set_bits_iterator = const_set_bits_iterator_impl<SmallBitVector>;
+  using set_iterator = const_set_bits_iterator;
+
+  const_set_bits_iterator set_bits_begin() const {
+    return const_set_bits_iterator(*this);
+  }
+
+  const_set_bits_iterator set_bits_end() const {
+    return const_set_bits_iterator(*this, -1);
+  }
+
+  iterator_range<const_set_bits_iterator> set_bits() const {
+    return make_range(set_bits_begin(), set_bits_end());
+  }
+
+  /// Tests whether there are no bits in this bitvector.
+  bool empty() const {
+    return isSmall() ? getSmallSize() == 0 : getPointer()->empty();
+  }
+
+  /// Returns the number of bits in this bitvector.
+  size_t size() const {
+    return isSmall() ? getSmallSize() : getPointer()->size();
+  }
+
+  /// Returns the number of bits which are set.
+  size_type count() const {
+    if (isSmall()) {
+      uintptr_t Bits = getSmallBits();
+      return countPopulation(Bits);
+    }
+    return getPointer()->count();
+  }
+
+  /// Returns true if any bit is set.
+  bool any() const {
+    if (isSmall())
+      return getSmallBits() != 0;
+    return getPointer()->any();
+  }
+
+  /// Returns true if all bits are set.
+  bool all() const {
+    if (isSmall())
+      return getSmallBits() == (uintptr_t(1) << getSmallSize()) - 1;
+    return getPointer()->all();
+  }
+
+  /// Returns true if none of the bits are set.
+  bool none() const {
+    if (isSmall())
+      return getSmallBits() == 0;
+    return getPointer()->none();
+  }
+
+  /// Returns the index of the first set bit, -1 if none of the bits are set.
+  int find_first() const {
+    if (isSmall()) {
+      uintptr_t Bits = getSmallBits();
+      if (Bits == 0)
+        return -1;
+      return countTrailingZeros(Bits);
+    }
+    return getPointer()->find_first();
+  }
+
+  int find_last() const {
+    if (isSmall()) {
+      uintptr_t Bits = getSmallBits();
+      if (Bits == 0)
+        return -1;
+      return NumBaseBits - countLeadingZeros(Bits);
+    }
+    return getPointer()->find_last();
+  }
+
+  /// Returns the index of the first unset bit, -1 if all of the bits are set.
+  int find_first_unset() const {
+    if (isSmall()) {
+      if (count() == getSmallSize())
+        return -1;
+
+      uintptr_t Bits = getSmallBits();
+      return countTrailingOnes(Bits);
+    }
+    return getPointer()->find_first_unset();
+  }
+
+  int find_last_unset() const {
+    if (isSmall()) {
+      if (count() == getSmallSize())
+        return -1;
+
+      uintptr_t Bits = getSmallBits();
+      return NumBaseBits - countLeadingOnes(Bits);
+    }
+    return getPointer()->find_last_unset();
+  }
+
+  /// Returns the index of the next set bit following the "Prev" bit.
+  /// Returns -1 if the next set bit is not found.
+  int find_next(unsigned Prev) const {
+    if (isSmall()) {
+      uintptr_t Bits = getSmallBits();
+      // Mask off previous bits.
+      Bits &= ~uintptr_t(0) << (Prev + 1);
+      if (Bits == 0 || Prev + 1 >= getSmallSize())
+        return -1;
+      return countTrailingZeros(Bits);
+    }
+    return getPointer()->find_next(Prev);
+  }
+
+  /// Returns the index of the next unset bit following the "Prev" bit.
+  /// Returns -1 if the next unset bit is not found.
+  int find_next_unset(unsigned Prev) const {
+    if (isSmall()) {
+      ++Prev;
+      uintptr_t Bits = getSmallBits();
+      // Mask in previous bits.
+      uintptr_t Mask = (1 << Prev) - 1;
+      Bits |= Mask;
+
+      if (Bits == ~uintptr_t(0) || Prev + 1 >= getSmallSize())
+        return -1;
+      return countTrailingOnes(Bits);
+    }
+    return getPointer()->find_next_unset(Prev);
+  }
+
+  /// find_prev - Returns the index of the first set bit that precedes the
+  /// the bit at \p PriorTo.  Returns -1 if all previous bits are unset.
+  int find_prev(unsigned PriorTo) const {
+    if (isSmall()) {
+      if (PriorTo == 0)
+        return -1;
+
+      --PriorTo;
+      uintptr_t Bits = getSmallBits();
+      Bits &= maskTrailingOnes<uintptr_t>(PriorTo + 1);
+      if (Bits == 0)
+        return -1;
+
+      return NumBaseBits - countLeadingZeros(Bits) - 1;
+    }
+    return getPointer()->find_prev(PriorTo);
+  }
+
+  /// Clear all bits.
+  void clear() {
+    if (!isSmall())
+      delete getPointer();
+    switchToSmall(0, 0);
+  }
+
+  /// Grow or shrink the bitvector.
+  void resize(unsigned N, bool t = false) {
+    if (!isSmall()) {
+      getPointer()->resize(N, t);
+    } else if (SmallNumDataBits >= N) {
+      uintptr_t NewBits = t ? ~uintptr_t(0) << getSmallSize() : 0;
+      setSmallSize(N);
+      setSmallBits(NewBits | getSmallBits());
+    } else {
+      BitVector *BV = new BitVector(N, t);
+      uintptr_t OldBits = getSmallBits();
+      for (size_t i = 0, e = getSmallSize(); i != e; ++i)
+        (*BV)[i] = (OldBits >> i) & 1;
+      switchToLarge(BV);
+    }
+  }
+
+  void reserve(unsigned N) {
+    if (isSmall()) {
+      if (N > SmallNumDataBits) {
+        uintptr_t OldBits = getSmallRawBits();
+        size_t SmallSize = getSmallSize();
+        BitVector *BV = new BitVector(SmallSize);
+        for (size_t i = 0; i < SmallSize; ++i)
+          if ((OldBits >> i) & 1)
+            BV->set(i);
+        BV->reserve(N);
+        switchToLarge(BV);
+      }
+    } else {
+      getPointer()->reserve(N);
+    }
+  }
+
+  // Set, reset, flip
+  SmallBitVector &set() {
+    if (isSmall())
+      setSmallBits(~uintptr_t(0));
+    else
+      getPointer()->set();
+    return *this;
+  }
+
+  SmallBitVector &set(unsigned Idx) {
+    if (isSmall()) {
+      assert(Idx <= static_cast<unsigned>(
+                        std::numeric_limits<uintptr_t>::digits) &&
+             "undefined behavior");
+      setSmallBits(getSmallBits() | (uintptr_t(1) << Idx));
+    }
+    else
+      getPointer()->set(Idx);
+    return *this;
+  }
+
+  /// Efficiently set a range of bits in [I, E)
+  SmallBitVector &set(unsigned I, unsigned E) {
+    assert(I <= E && "Attempted to set backwards range!");
+    assert(E <= size() && "Attempted to set out-of-bounds range!");
+    if (I == E) return *this;
+    if (isSmall()) {
+      uintptr_t EMask = ((uintptr_t)1) << E;
+      uintptr_t IMask = ((uintptr_t)1) << I;
+      uintptr_t Mask = EMask - IMask;
+      setSmallBits(getSmallBits() | Mask);
+    } else
+      getPointer()->set(I, E);
+    return *this;
+  }
+
+  SmallBitVector &reset() {
+    if (isSmall())
+      setSmallBits(0);
+    else
+      getPointer()->reset();
+    return *this;
+  }
+
+  SmallBitVector &reset(unsigned Idx) {
+    if (isSmall())
+      setSmallBits(getSmallBits() & ~(uintptr_t(1) << Idx));
+    else
+      getPointer()->reset(Idx);
+    return *this;
+  }
+
+  /// Efficiently reset a range of bits in [I, E)
+  SmallBitVector &reset(unsigned I, unsigned E) {
+    assert(I <= E && "Attempted to reset backwards range!");
+    assert(E <= size() && "Attempted to reset out-of-bounds range!");
+    if (I == E) return *this;
+    if (isSmall()) {
+      uintptr_t EMask = ((uintptr_t)1) << E;
+      uintptr_t IMask = ((uintptr_t)1) << I;
+      uintptr_t Mask = EMask - IMask;
+      setSmallBits(getSmallBits() & ~Mask);
+    } else
+      getPointer()->reset(I, E);
+    return *this;
+  }
+
+  SmallBitVector &flip() {
+    if (isSmall())
+      setSmallBits(~getSmallBits());
+    else
+      getPointer()->flip();
+    return *this;
+  }
+
+  SmallBitVector &flip(unsigned Idx) {
+    if (isSmall())
+      setSmallBits(getSmallBits() ^ (uintptr_t(1) << Idx));
+    else
+      getPointer()->flip(Idx);
+    return *this;
+  }
+
+  // No argument flip.
+  SmallBitVector operator~() const {
+    return SmallBitVector(*this).flip();
+  }
+
+  // Indexing.
+  reference operator[](unsigned Idx) {
+    assert(Idx < size() && "Out-of-bounds Bit access.");
+    return reference(*this, Idx);
+  }
+
+  bool operator[](unsigned Idx) const {
+    assert(Idx < size() && "Out-of-bounds Bit access.");
+    if (isSmall())
+      return ((getSmallBits() >> Idx) & 1) != 0;
+    return getPointer()->operator[](Idx);
+  }
+
+  bool test(unsigned Idx) const {
+    return (*this)[Idx];
+  }
+
+  /// Test if any common bits are set.
+  bool anyCommon(const SmallBitVector &RHS) const {
+    if (isSmall() && RHS.isSmall())
+      return (getSmallBits() & RHS.getSmallBits()) != 0;
+    if (!isSmall() && !RHS.isSmall())
+      return getPointer()->anyCommon(*RHS.getPointer());
+
+    for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
+      if (test(i) && RHS.test(i))
+        return true;
+    return false;
+  }
+
+  // Comparison operators.
+  bool operator==(const SmallBitVector &RHS) const {
+    if (size() != RHS.size())
+      return false;
+    if (isSmall())
+      return getSmallBits() == RHS.getSmallBits();
+    else
+      return *getPointer() == *RHS.getPointer();
+  }
+
+  bool operator!=(const SmallBitVector &RHS) const {
+    return !(*this == RHS);
+  }
+
+  // Intersection, union, disjoint union.
+  SmallBitVector &operator&=(const SmallBitVector &RHS) {
+    resize(std::max(size(), RHS.size()));
+    if (isSmall())
+      setSmallBits(getSmallBits() & RHS.getSmallBits());
+    else if (!RHS.isSmall())
+      getPointer()->operator&=(*RHS.getPointer());
+    else {
+      SmallBitVector Copy = RHS;
+      Copy.resize(size());
+      getPointer()->operator&=(*Copy.getPointer());
+    }
+    return *this;
+  }
+
+  /// Reset bits that are set in RHS. Same as *this &= ~RHS.
+  SmallBitVector &reset(const SmallBitVector &RHS) {
+    if (isSmall() && RHS.isSmall())
+      setSmallBits(getSmallBits() & ~RHS.getSmallBits());
+    else if (!isSmall() && !RHS.isSmall())
+      getPointer()->reset(*RHS.getPointer());
+    else
+      for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
+        if (RHS.test(i))
+          reset(i);
+
+    return *this;
+  }
+
+  /// Check if (This - RHS) is zero. This is the same as reset(RHS) and any().
+  bool test(const SmallBitVector &RHS) const {
+    if (isSmall() && RHS.isSmall())
+      return (getSmallBits() & ~RHS.getSmallBits()) != 0;
+    if (!isSmall() && !RHS.isSmall())
+      return getPointer()->test(*RHS.getPointer());
+
+    unsigned i, e;
+    for (i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
+      if (test(i) && !RHS.test(i))
+        return true;
+
+    for (e = size(); i != e; ++i)
+      if (test(i))
+        return true;
+
+    return false;
+  }
+
+  SmallBitVector &operator|=(const SmallBitVector &RHS) {
+    resize(std::max(size(), RHS.size()));
+    if (isSmall())
+      setSmallBits(getSmallBits() | RHS.getSmallBits());
+    else if (!RHS.isSmall())
+      getPointer()->operator|=(*RHS.getPointer());
+    else {
+      SmallBitVector Copy = RHS;
+      Copy.resize(size());
+      getPointer()->operator|=(*Copy.getPointer());
+    }
+    return *this;
+  }
+
+  SmallBitVector &operator^=(const SmallBitVector &RHS) {
+    resize(std::max(size(), RHS.size()));
+    if (isSmall())
+      setSmallBits(getSmallBits() ^ RHS.getSmallBits());
+    else if (!RHS.isSmall())
+      getPointer()->operator^=(*RHS.getPointer());
+    else {
+      SmallBitVector Copy = RHS;
+      Copy.resize(size());
+      getPointer()->operator^=(*Copy.getPointer());
+    }
+    return *this;
+  }
+
+  SmallBitVector &operator<<=(unsigned N) {
+    if (isSmall())
+      setSmallBits(getSmallBits() << N);
+    else
+      getPointer()->operator<<=(N);
+    return *this;
+  }
+
+  SmallBitVector &operator>>=(unsigned N) {
+    if (isSmall())
+      setSmallBits(getSmallBits() >> N);
+    else
+      getPointer()->operator>>=(N);
+    return *this;
+  }
+
+  // Assignment operator.
+  const SmallBitVector &operator=(const SmallBitVector &RHS) {
+    if (isSmall()) {
+      if (RHS.isSmall())
+        X = RHS.X;
+      else
+        switchToLarge(new BitVector(*RHS.getPointer()));
+    } else {
+      if (!RHS.isSmall())
+        *getPointer() = *RHS.getPointer();
+      else {
+        delete getPointer();
+        X = RHS.X;
+      }
+    }
+    return *this;
+  }
+
+  const SmallBitVector &operator=(SmallBitVector &&RHS) {
+    if (this != &RHS) {
+      clear();
+      swap(RHS);
+    }
+    return *this;
+  }
+
+  void swap(SmallBitVector &RHS) {
+    std::swap(X, RHS.X);
+  }
+
+  /// Add '1' bits from Mask to this vector. Don't resize.
+  /// This computes "*this |= Mask".
+  void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+    if (isSmall())
+      applyMask<true, false>(Mask, MaskWords);
+    else
+      getPointer()->setBitsInMask(Mask, MaskWords);
+  }
+
+  /// Clear any bits in this vector that are set in Mask. Don't resize.
+  /// This computes "*this &= ~Mask".
+  void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+    if (isSmall())
+      applyMask<false, false>(Mask, MaskWords);
+    else
+      getPointer()->clearBitsInMask(Mask, MaskWords);
+  }
+
+  /// Add a bit to this vector for every '0' bit in Mask. Don't resize.
+  /// This computes "*this |= ~Mask".
+  void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+    if (isSmall())
+      applyMask<true, true>(Mask, MaskWords);
+    else
+      getPointer()->setBitsNotInMask(Mask, MaskWords);
+  }
+
+  /// Clear a bit in this vector for every '0' bit in Mask. Don't resize.
+  /// This computes "*this &= Mask".
+  void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+    if (isSmall())
+      applyMask<false, true>(Mask, MaskWords);
+    else
+      getPointer()->clearBitsNotInMask(Mask, MaskWords);
+  }
+
+private:
+  template <bool AddBits, bool InvertMask>
+  void applyMask(const uint32_t *Mask, unsigned MaskWords) {
+    assert(MaskWords <= sizeof(uintptr_t) && "Mask is larger than base!");
+    uintptr_t M = Mask[0];
+    if (NumBaseBits == 64)
+      M |= uint64_t(Mask[1]) << 32;
+    if (InvertMask)
+      M = ~M;
+    if (AddBits)
+      setSmallBits(getSmallBits() | M);
+    else
+      setSmallBits(getSmallBits() & ~M);
+  }
+};
+
+inline SmallBitVector
+operator&(const SmallBitVector &LHS, const SmallBitVector &RHS) {
+  SmallBitVector Result(LHS);
+  Result &= RHS;
+  return Result;
+}
+
+inline SmallBitVector
+operator|(const SmallBitVector &LHS, const SmallBitVector &RHS) {
+  SmallBitVector Result(LHS);
+  Result |= RHS;
+  return Result;
+}
+
+inline SmallBitVector
+operator^(const SmallBitVector &LHS, const SmallBitVector &RHS) {
+  SmallBitVector Result(LHS);
+  Result ^= RHS;
+  return Result;
+}
+
+} // end namespace llvm
+
+namespace std {
+
+/// Implement std::swap in terms of BitVector swap.
+inline void
+swap(llvm::SmallBitVector &LHS, llvm::SmallBitVector &RHS) {
+  LHS.swap(RHS);
+}
+
+} // end namespace std
+
+#endif // LLVM_ADT_SMALLBITVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/SmallPtrSet.h b/linux-x64/clang/include/llvm/ADT/SmallPtrSet.h
new file mode 100644
index 0000000..78ea613
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SmallPtrSet.h
@@ -0,0 +1,486 @@
+//===- llvm/ADT/SmallPtrSet.h - 'Normally small' pointer set ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SmallPtrSet class.  See the doxygen comment for
+// SmallPtrSetImplBase for more details on the algorithm used.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLPTRSET_H
+#define LLVM_ADT_SMALLPTRSET_H
+
+#include "llvm/ADT/EpochTracker.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ReverseIteration.h"
+#include "llvm/Support/type_traits.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
+#include <initializer_list>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+/// SmallPtrSetImplBase - This is the common code shared among all the
+/// SmallPtrSet<>'s, which is almost everything.  SmallPtrSet has two modes, one
+/// for small and one for large sets.
+///
+/// Small sets use an array of pointers allocated in the SmallPtrSet object,
+/// which is treated as a simple array of pointers.  When a pointer is added to
+/// the set, the array is scanned to see if the element already exists, if not
+/// the element is 'pushed back' onto the array.  If we run out of space in the
+/// array, we grow into the 'large set' case.  SmallSet should be used when the
+/// sets are often small.  In this case, no memory allocation is used, and only
+/// light-weight and cache-efficient scanning is used.
+///
+/// Large sets use a classic exponentially-probed hash table.  Empty buckets are
+/// represented with an illegal pointer value (-1) to allow null pointers to be
+/// inserted.  Tombstones are represented with another illegal pointer value
+/// (-2), to allow deletion.  The hash table is resized when the table is 3/4 or
+/// more.  When this happens, the table is doubled in size.
+///
+class SmallPtrSetImplBase : public DebugEpochBase {
+  friend class SmallPtrSetIteratorImpl;
+
+protected:
+  /// SmallArray - Points to a fixed size set of buckets, used in 'small mode'.
+  const void **SmallArray;
+  /// CurArray - This is the current set of buckets.  If equal to SmallArray,
+  /// then the set is in 'small mode'.
+  const void **CurArray;
+  /// CurArraySize - The allocated size of CurArray, always a power of two.
+  unsigned CurArraySize;
+
+  /// Number of elements in CurArray that contain a value or are a tombstone.
+  /// If small, all these elements are at the beginning of CurArray and the rest
+  /// is uninitialized.
+  unsigned NumNonEmpty;
+  /// Number of tombstones in CurArray.
+  unsigned NumTombstones;
+
+  // Helpers to copy and move construct a SmallPtrSet.
+  SmallPtrSetImplBase(const void **SmallStorage,
+                      const SmallPtrSetImplBase &that);
+  SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize,
+                      SmallPtrSetImplBase &&that);
+
+  explicit SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize)
+      : SmallArray(SmallStorage), CurArray(SmallStorage),
+        CurArraySize(SmallSize), NumNonEmpty(0), NumTombstones(0) {
+    assert(SmallSize && (SmallSize & (SmallSize-1)) == 0 &&
+           "Initial size must be a power of two!");
+  }
+
+  ~SmallPtrSetImplBase() {
+    if (!isSmall())
+      free(CurArray);
+  }
+
+public:
+  using size_type = unsigned;
+
+  SmallPtrSetImplBase &operator=(const SmallPtrSetImplBase &) = delete;
+
+  LLVM_NODISCARD bool empty() const { return size() == 0; }
+  size_type size() const { return NumNonEmpty - NumTombstones; }
+
+  void clear() {
+    incrementEpoch();
+    // If the capacity of the array is huge, and the # elements used is small,
+    // shrink the array.
+    if (!isSmall()) {
+      if (size() * 4 < CurArraySize && CurArraySize > 32)
+        return shrink_and_clear();
+      // Fill the array with empty markers.
+      memset(CurArray, -1, CurArraySize * sizeof(void *));
+    }
+
+    NumNonEmpty = 0;
+    NumTombstones = 0;
+  }
+
+protected:
+  static void *getTombstoneMarker() { return reinterpret_cast<void*>(-2); }
+
+  static void *getEmptyMarker() {
+    // Note that -1 is chosen to make clear() efficiently implementable with
+    // memset and because it's not a valid pointer value.
+    return reinterpret_cast<void*>(-1);
+  }
+
+  const void **EndPointer() const {
+    return isSmall() ? CurArray + NumNonEmpty : CurArray + CurArraySize;
+  }
+
+  /// insert_imp - This returns true if the pointer was new to the set, false if
+  /// it was already in the set.  This is hidden from the client so that the
+  /// derived class can check that the right type of pointer is passed in.
+  std::pair<const void *const *, bool> insert_imp(const void *Ptr) {
+    if (isSmall()) {
+      // Check to see if it is already in the set.
+      const void **LastTombstone = nullptr;
+      for (const void **APtr = SmallArray, **E = SmallArray + NumNonEmpty;
+           APtr != E; ++APtr) {
+        const void *Value = *APtr;
+        if (Value == Ptr)
+          return std::make_pair(APtr, false);
+        if (Value == getTombstoneMarker())
+          LastTombstone = APtr;
+      }
+
+      // Did we find any tombstone marker?
+      if (LastTombstone != nullptr) {
+        *LastTombstone = Ptr;
+        --NumTombstones;
+        incrementEpoch();
+        return std::make_pair(LastTombstone, true);
+      }
+
+      // Nope, there isn't.  If we stay small, just 'pushback' now.
+      if (NumNonEmpty < CurArraySize) {
+        SmallArray[NumNonEmpty++] = Ptr;
+        incrementEpoch();
+        return std::make_pair(SmallArray + (NumNonEmpty - 1), true);
+      }
+      // Otherwise, hit the big set case, which will call grow.
+    }
+    return insert_imp_big(Ptr);
+  }
+
+  /// erase_imp - If the set contains the specified pointer, remove it and
+  /// return true, otherwise return false.  This is hidden from the client so
+  /// that the derived class can check that the right type of pointer is passed
+  /// in.
+  bool erase_imp(const void * Ptr) {
+    const void *const *P = find_imp(Ptr);
+    if (P == EndPointer())
+      return false;
+
+    const void **Loc = const_cast<const void **>(P);
+    assert(*Loc == Ptr && "broken find!");
+    *Loc = getTombstoneMarker();
+    NumTombstones++;
+    return true;
+  }
+
+  /// Returns the raw pointer needed to construct an iterator.  If element not
+  /// found, this will be EndPointer.  Otherwise, it will be a pointer to the
+  /// slot which stores Ptr;
+  const void *const * find_imp(const void * Ptr) const {
+    if (isSmall()) {
+      // Linear search for the item.
+      for (const void *const *APtr = SmallArray,
+                      *const *E = SmallArray + NumNonEmpty; APtr != E; ++APtr)
+        if (*APtr == Ptr)
+          return APtr;
+      return EndPointer();
+    }
+
+    // Big set case.
+    auto *Bucket = FindBucketFor(Ptr);
+    if (*Bucket == Ptr)
+      return Bucket;
+    return EndPointer();
+  }
+
+private:
+  bool isSmall() const { return CurArray == SmallArray; }
+
+  std::pair<const void *const *, bool> insert_imp_big(const void *Ptr);
+
+  const void * const *FindBucketFor(const void *Ptr) const;
+  void shrink_and_clear();
+
+  /// Grow - Allocate a larger backing store for the buckets and move it over.
+  void Grow(unsigned NewSize);
+
+protected:
+  /// swap - Swaps the elements of two sets.
+  /// Note: This method assumes that both sets have the same small size.
+  void swap(SmallPtrSetImplBase &RHS);
+
+  void CopyFrom(const SmallPtrSetImplBase &RHS);
+  void MoveFrom(unsigned SmallSize, SmallPtrSetImplBase &&RHS);
+
+private:
+  /// Code shared by MoveFrom() and move constructor.
+  void MoveHelper(unsigned SmallSize, SmallPtrSetImplBase &&RHS);
+  /// Code shared by CopyFrom() and copy constructor.
+  void CopyHelper(const SmallPtrSetImplBase &RHS);
+};
+
+/// SmallPtrSetIteratorImpl - This is the common base class shared between all
+/// instances of SmallPtrSetIterator.
+class SmallPtrSetIteratorImpl {
+protected:
+  const void *const *Bucket;
+  const void *const *End;
+
+public:
+  explicit SmallPtrSetIteratorImpl(const void *const *BP, const void*const *E)
+    : Bucket(BP), End(E) {
+    if (shouldReverseIterate()) {
+      RetreatIfNotValid();
+      return;
+    }
+    AdvanceIfNotValid();
+  }
+
+  bool operator==(const SmallPtrSetIteratorImpl &RHS) const {
+    return Bucket == RHS.Bucket;
+  }
+  bool operator!=(const SmallPtrSetIteratorImpl &RHS) const {
+    return Bucket != RHS.Bucket;
+  }
+
+protected:
+  /// AdvanceIfNotValid - If the current bucket isn't valid, advance to a bucket
+  /// that is.   This is guaranteed to stop because the end() bucket is marked
+  /// valid.
+  void AdvanceIfNotValid() {
+    assert(Bucket <= End);
+    while (Bucket != End &&
+           (*Bucket == SmallPtrSetImplBase::getEmptyMarker() ||
+            *Bucket == SmallPtrSetImplBase::getTombstoneMarker()))
+      ++Bucket;
+  }
+  void RetreatIfNotValid() {
+    assert(Bucket >= End);
+    while (Bucket != End &&
+           (Bucket[-1] == SmallPtrSetImplBase::getEmptyMarker() ||
+            Bucket[-1] == SmallPtrSetImplBase::getTombstoneMarker())) {
+      --Bucket;
+    }
+  }
+};
+
+/// SmallPtrSetIterator - This implements a const_iterator for SmallPtrSet.
+template <typename PtrTy>
+class SmallPtrSetIterator : public SmallPtrSetIteratorImpl,
+                            DebugEpochBase::HandleBase {
+  using PtrTraits = PointerLikeTypeTraits<PtrTy>;
+
+public:
+  using value_type = PtrTy;
+  using reference = PtrTy;
+  using pointer = PtrTy;
+  using difference_type = std::ptrdiff_t;
+  using iterator_category = std::forward_iterator_tag;
+
+  explicit SmallPtrSetIterator(const void *const *BP, const void *const *E,
+                               const DebugEpochBase &Epoch)
+      : SmallPtrSetIteratorImpl(BP, E), DebugEpochBase::HandleBase(&Epoch) {}
+
+  // Most methods provided by baseclass.
+
+  const PtrTy operator*() const {
+    assert(isHandleInSync() && "invalid iterator access!");
+    if (shouldReverseIterate()) {
+      assert(Bucket > End);
+      return PtrTraits::getFromVoidPointer(const_cast<void *>(Bucket[-1]));
+    }
+    assert(Bucket < End);
+    return PtrTraits::getFromVoidPointer(const_cast<void*>(*Bucket));
+  }
+
+  inline SmallPtrSetIterator& operator++() {          // Preincrement
+    assert(isHandleInSync() && "invalid iterator access!");
+    if (shouldReverseIterate()) {
+      --Bucket;
+      RetreatIfNotValid();
+      return *this;
+    }
+    ++Bucket;
+    AdvanceIfNotValid();
+    return *this;
+  }
+
+  SmallPtrSetIterator operator++(int) {        // Postincrement
+    SmallPtrSetIterator tmp = *this;
+    ++*this;
+    return tmp;
+  }
+};
+
+/// RoundUpToPowerOfTwo - This is a helper template that rounds N up to the next
+/// power of two (which means N itself if N is already a power of two).
+template<unsigned N>
+struct RoundUpToPowerOfTwo;
+
+/// RoundUpToPowerOfTwoH - If N is not a power of two, increase it.  This is a
+/// helper template used to implement RoundUpToPowerOfTwo.
+template<unsigned N, bool isPowerTwo>
+struct RoundUpToPowerOfTwoH {
+  enum { Val = N };
+};
+template<unsigned N>
+struct RoundUpToPowerOfTwoH<N, false> {
+  enum {
+    // We could just use NextVal = N+1, but this converges faster.  N|(N-1) sets
+    // the right-most zero bits to one all at once, e.g. 0b0011000 -> 0b0011111.
+    Val = RoundUpToPowerOfTwo<(N|(N-1)) + 1>::Val
+  };
+};
+
+template<unsigned N>
+struct RoundUpToPowerOfTwo {
+  enum { Val = RoundUpToPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
+};
+
+/// \brief A templated base class for \c SmallPtrSet which provides the
+/// typesafe interface that is common across all small sizes.
+///
+/// This is particularly useful for passing around between interface boundaries
+/// to avoid encoding a particular small size in the interface boundary.
+template <typename PtrType>
+class SmallPtrSetImpl : public SmallPtrSetImplBase {
+  using ConstPtrType = typename add_const_past_pointer<PtrType>::type;
+  using PtrTraits = PointerLikeTypeTraits<PtrType>;
+  using ConstPtrTraits = PointerLikeTypeTraits<ConstPtrType>;
+
+protected:
+  // Constructors that forward to the base.
+  SmallPtrSetImpl(const void **SmallStorage, const SmallPtrSetImpl &that)
+      : SmallPtrSetImplBase(SmallStorage, that) {}
+  SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize,
+                  SmallPtrSetImpl &&that)
+      : SmallPtrSetImplBase(SmallStorage, SmallSize, std::move(that)) {}
+  explicit SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize)
+      : SmallPtrSetImplBase(SmallStorage, SmallSize) {}
+
+public:
+  using iterator = SmallPtrSetIterator<PtrType>;
+  using const_iterator = SmallPtrSetIterator<PtrType>;
+  using key_type = ConstPtrType;
+  using value_type = PtrType;
+
+  SmallPtrSetImpl(const SmallPtrSetImpl &) = delete;
+
+  /// Inserts Ptr if and only if there is no element in the container equal to
+  /// Ptr. The bool component of the returned pair is true if and only if the
+  /// insertion takes place, and the iterator component of the pair points to
+  /// the element equal to Ptr.
+  std::pair<iterator, bool> insert(PtrType Ptr) {
+    auto p = insert_imp(PtrTraits::getAsVoidPointer(Ptr));
+    return std::make_pair(makeIterator(p.first), p.second);
+  }
+
+  /// erase - If the set contains the specified pointer, remove it and return
+  /// true, otherwise return false.
+  bool erase(PtrType Ptr) {
+    return erase_imp(PtrTraits::getAsVoidPointer(Ptr));
+  }
+  /// count - Return 1 if the specified pointer is in the set, 0 otherwise.
+  size_type count(ConstPtrType Ptr) const { return find(Ptr) != end() ? 1 : 0; }
+  iterator find(ConstPtrType Ptr) const {
+    return makeIterator(find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)));
+  }
+
+  template <typename IterT>
+  void insert(IterT I, IterT E) {
+    for (; I != E; ++I)
+      insert(*I);
+  }
+
+  void insert(std::initializer_list<PtrType> IL) {
+    insert(IL.begin(), IL.end());
+  }
+
+  iterator begin() const {
+    if (shouldReverseIterate())
+      return makeIterator(EndPointer() - 1);
+    return makeIterator(CurArray);
+  }
+  iterator end() const { return makeIterator(EndPointer()); }
+
+private:
+  /// Create an iterator that dereferences to same place as the given pointer.
+  iterator makeIterator(const void *const *P) const {
+    if (shouldReverseIterate())
+      return iterator(P == EndPointer() ? CurArray : P + 1, CurArray, *this);
+    return iterator(P, EndPointer(), *this);
+  }
+};
+
+/// SmallPtrSet - This class implements a set which is optimized for holding
+/// SmallSize or less elements.  This internally rounds up SmallSize to the next
+/// power of two if it is not already a power of two.  See the comments above
+/// SmallPtrSetImplBase for details of the algorithm.
+template<class PtrType, unsigned SmallSize>
+class SmallPtrSet : public SmallPtrSetImpl<PtrType> {
+  // In small mode SmallPtrSet uses linear search for the elements, so it is
+  // not a good idea to choose this value too high. You may consider using a
+  // DenseSet<> instead if you expect many elements in the set.
+  static_assert(SmallSize <= 32, "SmallSize should be small");
+
+  using BaseT = SmallPtrSetImpl<PtrType>;
+
+  // Make sure that SmallSize is a power of two, round up if not.
+  enum { SmallSizePowTwo = RoundUpToPowerOfTwo<SmallSize>::Val };
+  /// SmallStorage - Fixed size storage used in 'small mode'.
+  const void *SmallStorage[SmallSizePowTwo];
+
+public:
+  SmallPtrSet() : BaseT(SmallStorage, SmallSizePowTwo) {}
+  SmallPtrSet(const SmallPtrSet &that) : BaseT(SmallStorage, that) {}
+  SmallPtrSet(SmallPtrSet &&that)
+      : BaseT(SmallStorage, SmallSizePowTwo, std::move(that)) {}
+
+  template<typename It>
+  SmallPtrSet(It I, It E) : BaseT(SmallStorage, SmallSizePowTwo) {
+    this->insert(I, E);
+  }
+
+  SmallPtrSet(std::initializer_list<PtrType> IL)
+      : BaseT(SmallStorage, SmallSizePowTwo) {
+    this->insert(IL.begin(), IL.end());
+  }
+
+  SmallPtrSet<PtrType, SmallSize> &
+  operator=(const SmallPtrSet<PtrType, SmallSize> &RHS) {
+    if (&RHS != this)
+      this->CopyFrom(RHS);
+    return *this;
+  }
+
+  SmallPtrSet<PtrType, SmallSize> &
+  operator=(SmallPtrSet<PtrType, SmallSize> &&RHS) {
+    if (&RHS != this)
+      this->MoveFrom(SmallSizePowTwo, std::move(RHS));
+    return *this;
+  }
+
+  SmallPtrSet<PtrType, SmallSize> &
+  operator=(std::initializer_list<PtrType> IL) {
+    this->clear();
+    this->insert(IL.begin(), IL.end());
+    return *this;
+  }
+
+  /// swap - Swaps the elements of two sets.
+  void swap(SmallPtrSet<PtrType, SmallSize> &RHS) {
+    SmallPtrSetImplBase::swap(RHS);
+  }
+};
+
+} // end namespace llvm
+
+namespace std {
+
+  /// Implement std::swap in terms of SmallPtrSet swap.
+  template<class T, unsigned N>
+  inline void swap(llvm::SmallPtrSet<T, N> &LHS, llvm::SmallPtrSet<T, N> &RHS) {
+    LHS.swap(RHS);
+  }
+
+} // end namespace std
+
+#endif // LLVM_ADT_SMALLPTRSET_H
diff --git a/linux-x64/clang/include/llvm/ADT/SmallSet.h b/linux-x64/clang/include/llvm/ADT/SmallSet.h
new file mode 100644
index 0000000..d52d0f0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SmallSet.h
@@ -0,0 +1,142 @@
+//===- llvm/ADT/SmallSet.h - 'Normally small' sets --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SmallSet class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLSET_H
+#define LLVM_ADT_SMALLSET_H
+
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+#include <cstddef>
+#include <functional>
+#include <set>
+#include <utility>
+
+namespace llvm {
+
+/// SmallSet - This maintains a set of unique values, optimizing for the case
+/// when the set is small (less than N).  In this case, the set can be
+/// maintained with no mallocs.  If the set gets large, we expand to using an
+/// std::set to maintain reasonable lookup times.
+///
+/// Note that this set does not provide a way to iterate over members in the
+/// set.
+template <typename T, unsigned N, typename C = std::less<T>>
+class SmallSet {
+  /// Use a SmallVector to hold the elements here (even though it will never
+  /// reach its 'large' stage) to avoid calling the default ctors of elements
+  /// we will never use.
+  SmallVector<T, N> Vector;
+  std::set<T, C> Set;
+
+  using VIterator = typename SmallVector<T, N>::const_iterator;
+  using mutable_iterator = typename SmallVector<T, N>::iterator;
+
+  // In small mode SmallPtrSet uses linear search for the elements, so it is
+  // not a good idea to choose this value too high. You may consider using a
+  // DenseSet<> instead if you expect many elements in the set.
+  static_assert(N <= 32, "N should be small");
+
+public:
+  using size_type = size_t;
+
+  SmallSet() = default;
+
+  LLVM_NODISCARD bool empty() const {
+    return Vector.empty() && Set.empty();
+  }
+
+  size_type size() const {
+    return isSmall() ? Vector.size() : Set.size();
+  }
+
+  /// count - Return 1 if the element is in the set, 0 otherwise.
+  size_type count(const T &V) const {
+    if (isSmall()) {
+      // Since the collection is small, just do a linear search.
+      return vfind(V) == Vector.end() ? 0 : 1;
+    } else {
+      return Set.count(V);
+    }
+  }
+
+  /// insert - Insert an element into the set if it isn't already there.
+  /// Returns true if the element is inserted (it was not in the set before).
+  /// The first value of the returned pair is unused and provided for
+  /// partial compatibility with the standard library self-associative container
+  /// concept.
+  // FIXME: Add iterators that abstract over the small and large form, and then
+  // return those here.
+  std::pair<NoneType, bool> insert(const T &V) {
+    if (!isSmall())
+      return std::make_pair(None, Set.insert(V).second);
+
+    VIterator I = vfind(V);
+    if (I != Vector.end())    // Don't reinsert if it already exists.
+      return std::make_pair(None, false);
+    if (Vector.size() < N) {
+      Vector.push_back(V);
+      return std::make_pair(None, true);
+    }
+
+    // Otherwise, grow from vector to set.
+    while (!Vector.empty()) {
+      Set.insert(Vector.back());
+      Vector.pop_back();
+    }
+    Set.insert(V);
+    return std::make_pair(None, true);
+  }
+
+  template <typename IterT>
+  void insert(IterT I, IterT E) {
+    for (; I != E; ++I)
+      insert(*I);
+  }
+
+  bool erase(const T &V) {
+    if (!isSmall())
+      return Set.erase(V);
+    for (mutable_iterator I = Vector.begin(), E = Vector.end(); I != E; ++I)
+      if (*I == V) {
+        Vector.erase(I);
+        return true;
+      }
+    return false;
+  }
+
+  void clear() {
+    Vector.clear();
+    Set.clear();
+  }
+
+private:
+  bool isSmall() const { return Set.empty(); }
+
+  VIterator vfind(const T &V) const {
+    for (VIterator I = Vector.begin(), E = Vector.end(); I != E; ++I)
+      if (*I == V)
+        return I;
+    return Vector.end();
+  }
+};
+
+/// If this set is of pointer values, transparently switch over to using
+/// SmallPtrSet for performance.
+template <typename PointeeType, unsigned N>
+class SmallSet<PointeeType*, N> : public SmallPtrSet<PointeeType*, N> {};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SMALLSET_H
diff --git a/linux-x64/clang/include/llvm/ADT/SmallString.h b/linux-x64/clang/include/llvm/ADT/SmallString.h
new file mode 100644
index 0000000..ff46e85
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SmallString.h
@@ -0,0 +1,297 @@
+//===- llvm/ADT/SmallString.h - 'Normally small' strings --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SmallString class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLSTRING_H
+#define LLVM_ADT_SMALLSTRING_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include <cstddef>
+
+namespace llvm {
+
+/// SmallString - A SmallString is just a SmallVector with methods and accessors
+/// that make it work better as a string (e.g. operator+ etc).
+template<unsigned InternalLen>
+class SmallString : public SmallVector<char, InternalLen> {
+public:
+  /// Default ctor - Initialize to empty.
+  SmallString() = default;
+
+  /// Initialize from a StringRef.
+  SmallString(StringRef S) : SmallVector<char, InternalLen>(S.begin(), S.end()) {}
+
+  /// Initialize with a range.
+  template<typename ItTy>
+  SmallString(ItTy S, ItTy E) : SmallVector<char, InternalLen>(S, E) {}
+
+  // Note that in order to add new overloads for append & assign, we have to
+  // duplicate the inherited versions so as not to inadvertently hide them.
+
+  /// @}
+  /// @name String Assignment
+  /// @{
+
+  /// Assign from a repeated element.
+  void assign(size_t NumElts, char Elt) {
+    this->SmallVectorImpl<char>::assign(NumElts, Elt);
+  }
+
+  /// Assign from an iterator pair.
+  template<typename in_iter>
+  void assign(in_iter S, in_iter E) {
+    this->clear();
+    SmallVectorImpl<char>::append(S, E);
+  }
+
+  /// Assign from a StringRef.
+  void assign(StringRef RHS) {
+    this->clear();
+    SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
+  }
+
+  /// Assign from a SmallVector.
+  void assign(const SmallVectorImpl<char> &RHS) {
+    this->clear();
+    SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
+  }
+
+  /// @}
+  /// @name String Concatenation
+  /// @{
+
+  /// Append from an iterator pair.
+  template<typename in_iter>
+  void append(in_iter S, in_iter E) {
+    SmallVectorImpl<char>::append(S, E);
+  }
+
+  void append(size_t NumInputs, char Elt) {
+    SmallVectorImpl<char>::append(NumInputs, Elt);
+  }
+
+  /// Append from a StringRef.
+  void append(StringRef RHS) {
+    SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
+  }
+
+  /// Append from a SmallVector.
+  void append(const SmallVectorImpl<char> &RHS) {
+    SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
+  }
+
+  /// @}
+  /// @name String Comparison
+  /// @{
+
+  /// Check for string equality.  This is more efficient than compare() when
+  /// the relative ordering of inequal strings isn't needed.
+  bool equals(StringRef RHS) const {
+    return str().equals(RHS);
+  }
+
+  /// Check for string equality, ignoring case.
+  bool equals_lower(StringRef RHS) const {
+    return str().equals_lower(RHS);
+  }
+
+  /// Compare two strings; the result is -1, 0, or 1 if this string is
+  /// lexicographically less than, equal to, or greater than the \p RHS.
+  int compare(StringRef RHS) const {
+    return str().compare(RHS);
+  }
+
+  /// compare_lower - Compare two strings, ignoring case.
+  int compare_lower(StringRef RHS) const {
+    return str().compare_lower(RHS);
+  }
+
+  /// compare_numeric - Compare two strings, treating sequences of digits as
+  /// numbers.
+  int compare_numeric(StringRef RHS) const {
+    return str().compare_numeric(RHS);
+  }
+
+  /// @}
+  /// @name String Predicates
+  /// @{
+
+  /// startswith - Check if this string starts with the given \p Prefix.
+  bool startswith(StringRef Prefix) const {
+    return str().startswith(Prefix);
+  }
+
+  /// endswith - Check if this string ends with the given \p Suffix.
+  bool endswith(StringRef Suffix) const {
+    return str().endswith(Suffix);
+  }
+
+  /// @}
+  /// @name String Searching
+  /// @{
+
+  /// find - Search for the first character \p C in the string.
+  ///
+  /// \return - The index of the first occurrence of \p C, or npos if not
+  /// found.
+  size_t find(char C, size_t From = 0) const {
+    return str().find(C, From);
+  }
+
+  /// Search for the first string \p Str in the string.
+  ///
+  /// \returns The index of the first occurrence of \p Str, or npos if not
+  /// found.
+  size_t find(StringRef Str, size_t From = 0) const {
+    return str().find(Str, From);
+  }
+
+  /// Search for the last character \p C in the string.
+  ///
+  /// \returns The index of the last occurrence of \p C, or npos if not
+  /// found.
+  size_t rfind(char C, size_t From = StringRef::npos) const {
+    return str().rfind(C, From);
+  }
+
+  /// Search for the last string \p Str in the string.
+  ///
+  /// \returns The index of the last occurrence of \p Str, or npos if not
+  /// found.
+  size_t rfind(StringRef Str) const {
+    return str().rfind(Str);
+  }
+
+  /// Find the first character in the string that is \p C, or npos if not
+  /// found. Same as find.
+  size_t find_first_of(char C, size_t From = 0) const {
+    return str().find_first_of(C, From);
+  }
+
+  /// Find the first character in the string that is in \p Chars, or npos if
+  /// not found.
+  ///
+  /// Complexity: O(size() + Chars.size())
+  size_t find_first_of(StringRef Chars, size_t From = 0) const {
+    return str().find_first_of(Chars, From);
+  }
+
+  /// Find the first character in the string that is not \p C or npos if not
+  /// found.
+  size_t find_first_not_of(char C, size_t From = 0) const {
+    return str().find_first_not_of(C, From);
+  }
+
+  /// Find the first character in the string that is not in the string
+  /// \p Chars, or npos if not found.
+  ///
+  /// Complexity: O(size() + Chars.size())
+  size_t find_first_not_of(StringRef Chars, size_t From = 0) const {
+    return str().find_first_not_of(Chars, From);
+  }
+
+  /// Find the last character in the string that is \p C, or npos if not
+  /// found.
+  size_t find_last_of(char C, size_t From = StringRef::npos) const {
+    return str().find_last_of(C, From);
+  }
+
+  /// Find the last character in the string that is in \p C, or npos if not
+  /// found.
+  ///
+  /// Complexity: O(size() + Chars.size())
+  size_t find_last_of(
+      StringRef Chars, size_t From = StringRef::npos) const {
+    return str().find_last_of(Chars, From);
+  }
+
+  /// @}
+  /// @name Helpful Algorithms
+  /// @{
+
+  /// Return the number of occurrences of \p C in the string.
+  size_t count(char C) const {
+    return str().count(C);
+  }
+
+  /// Return the number of non-overlapped occurrences of \p Str in the
+  /// string.
+  size_t count(StringRef Str) const {
+    return str().count(Str);
+  }
+
+  /// @}
+  /// @name Substring Operations
+  /// @{
+
+  /// Return a reference to the substring from [Start, Start + N).
+  ///
+  /// \param Start The index of the starting character in the substring; if
+  /// the index is npos or greater than the length of the string then the
+  /// empty substring will be returned.
+  ///
+  /// \param N The number of characters to included in the substring. If \p N
+  /// exceeds the number of characters remaining in the string, the string
+  /// suffix (starting with \p Start) will be returned.
+  StringRef substr(size_t Start, size_t N = StringRef::npos) const {
+    return str().substr(Start, N);
+  }
+
+  /// Return a reference to the substring from [Start, End).
+  ///
+  /// \param Start The index of the starting character in the substring; if
+  /// the index is npos or greater than the length of the string then the
+  /// empty substring will be returned.
+  ///
+  /// \param End The index following the last character to include in the
+  /// substring. If this is npos, or less than \p Start, or exceeds the
+  /// number of characters remaining in the string, the string suffix
+  /// (starting with \p Start) will be returned.
+  StringRef slice(size_t Start, size_t End) const {
+    return str().slice(Start, End);
+  }
+
+  // Extra methods.
+
+  /// Explicit conversion to StringRef.
+  StringRef str() const { return StringRef(this->begin(), this->size()); }
+
+  // TODO: Make this const, if it's safe...
+  const char* c_str() {
+    this->push_back(0);
+    this->pop_back();
+    return this->data();
+  }
+
+  /// Implicit conversion to StringRef.
+  operator StringRef() const { return str(); }
+
+  // Extra operators.
+  const SmallString &operator=(StringRef RHS) {
+    this->clear();
+    return *this += RHS;
+  }
+
+  SmallString &operator+=(StringRef RHS) {
+    this->append(RHS.begin(), RHS.end());
+    return *this;
+  }
+  SmallString &operator+=(char C) {
+    this->push_back(C);
+    return *this;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SMALLSTRING_H
diff --git a/linux-x64/clang/include/llvm/ADT/SmallVector.h b/linux-x64/clang/include/llvm/ADT/SmallVector.h
new file mode 100644
index 0000000..3d17e70
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SmallVector.h
@@ -0,0 +1,958 @@
+//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SmallVector class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLVECTOR_H
+#define LLVM_ADT_SMALLVECTOR_H
+
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/type_traits.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+/// This is all the non-templated stuff common to all SmallVectors.
+class SmallVectorBase {
+protected:
+  void *BeginX, *EndX, *CapacityX;
+
+protected:
+  SmallVectorBase(void *FirstEl, size_t Size)
+    : BeginX(FirstEl), EndX(FirstEl), CapacityX((char*)FirstEl+Size) {}
+
+  /// This is an implementation of the grow() method which only works
+  /// on POD-like data types and is out of line to reduce code duplication.
+  void grow_pod(void *FirstEl, size_t MinSizeInBytes, size_t TSize);
+
+public:
+  /// This returns size()*sizeof(T).
+  size_t size_in_bytes() const {
+    return size_t((char*)EndX - (char*)BeginX);
+  }
+
+  /// capacity_in_bytes - This returns capacity()*sizeof(T).
+  size_t capacity_in_bytes() const {
+    return size_t((char*)CapacityX - (char*)BeginX);
+  }
+
+  LLVM_NODISCARD bool empty() const { return BeginX == EndX; }
+};
+
+/// This is the part of SmallVectorTemplateBase which does not depend on whether
+/// the type T is a POD. The extra dummy template argument is used by ArrayRef
+/// to avoid unnecessarily requiring T to be complete.
+template <typename T, typename = void>
+class SmallVectorTemplateCommon : public SmallVectorBase {
+private:
+  template <typename, unsigned> friend struct SmallVectorStorage;
+
+  // Allocate raw space for N elements of type T.  If T has a ctor or dtor, we
+  // don't want it to be automatically run, so we need to represent the space as
+  // something else.  Use an array of char of sufficient alignment.
+  using U = AlignedCharArrayUnion<T>;
+  U FirstEl;
+  // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
+
+protected:
+  SmallVectorTemplateCommon(size_t Size) : SmallVectorBase(&FirstEl, Size) {}
+
+  void grow_pod(size_t MinSizeInBytes, size_t TSize) {
+    SmallVectorBase::grow_pod(&FirstEl, MinSizeInBytes, TSize);
+  }
+
+  /// Return true if this is a smallvector which has not had dynamic
+  /// memory allocated for it.
+  bool isSmall() const {
+    return BeginX == static_cast<const void*>(&FirstEl);
+  }
+
+  /// Put this vector in a state of being small.
+  void resetToSmall() {
+    BeginX = EndX = CapacityX = &FirstEl;
+  }
+
+  void setEnd(T *P) { this->EndX = P; }
+
+public:
+  using size_type = size_t;
+  using difference_type = ptrdiff_t;
+  using value_type = T;
+  using iterator = T *;
+  using const_iterator = const T *;
+
+  using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+  using reverse_iterator = std::reverse_iterator<iterator>;
+
+  using reference = T &;
+  using const_reference = const T &;
+  using pointer = T *;
+  using const_pointer = const T *;
+
+  // forward iterator creation methods.
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  iterator begin() { return (iterator)this->BeginX; }
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  const_iterator begin() const { return (const_iterator)this->BeginX; }
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  iterator end() { return (iterator)this->EndX; }
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  const_iterator end() const { return (const_iterator)this->EndX; }
+
+protected:
+  iterator capacity_ptr() { return (iterator)this->CapacityX; }
+  const_iterator capacity_ptr() const { return (const_iterator)this->CapacityX;}
+
+public:
+  // reverse iterator creation methods.
+  reverse_iterator rbegin()            { return reverse_iterator(end()); }
+  const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
+  reverse_iterator rend()              { return reverse_iterator(begin()); }
+  const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  size_type size() const { return end()-begin(); }
+  size_type max_size() const { return size_type(-1) / sizeof(T); }
+
+  /// Return the total number of elements in the currently allocated buffer.
+  size_t capacity() const { return capacity_ptr() - begin(); }
+
+  /// Return a pointer to the vector's buffer, even if empty().
+  pointer data() { return pointer(begin()); }
+  /// Return a pointer to the vector's buffer, even if empty().
+  const_pointer data() const { return const_pointer(begin()); }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  reference operator[](size_type idx) {
+    assert(idx < size());
+    return begin()[idx];
+  }
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  const_reference operator[](size_type idx) const {
+    assert(idx < size());
+    return begin()[idx];
+  }
+
+  reference front() {
+    assert(!empty());
+    return begin()[0];
+  }
+  const_reference front() const {
+    assert(!empty());
+    return begin()[0];
+  }
+
+  reference back() {
+    assert(!empty());
+    return end()[-1];
+  }
+  const_reference back() const {
+    assert(!empty());
+    return end()[-1];
+  }
+};
+
+/// SmallVectorTemplateBase<isPodLike = false> - This is where we put method
+/// implementations that are designed to work with non-POD-like T's.
+template <typename T, bool isPodLike>
+class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
+protected:
+  SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
+
+  static void destroy_range(T *S, T *E) {
+    while (S != E) {
+      --E;
+      E->~T();
+    }
+  }
+
+  /// Move the range [I, E) into the uninitialized memory starting with "Dest",
+  /// constructing elements as needed.
+  template<typename It1, typename It2>
+  static void uninitialized_move(It1 I, It1 E, It2 Dest) {
+    std::uninitialized_copy(std::make_move_iterator(I),
+                            std::make_move_iterator(E), Dest);
+  }
+
+  /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
+  /// constructing elements as needed.
+  template<typename It1, typename It2>
+  static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
+    std::uninitialized_copy(I, E, Dest);
+  }
+
+  /// Grow the allocated memory (without initializing new elements), doubling
+  /// the size of the allocated memory. Guarantees space for at least one more
+  /// element, or MinSize more elements if specified.
+  void grow(size_t MinSize = 0);
+
+public:
+  void push_back(const T &Elt) {
+    if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
+      this->grow();
+    ::new ((void*) this->end()) T(Elt);
+    this->setEnd(this->end()+1);
+  }
+
+  void push_back(T &&Elt) {
+    if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
+      this->grow();
+    ::new ((void*) this->end()) T(::std::move(Elt));
+    this->setEnd(this->end()+1);
+  }
+
+  void pop_back() {
+    this->setEnd(this->end()-1);
+    this->end()->~T();
+  }
+};
+
+// Define this out-of-line to dissuade the C++ compiler from inlining it.
+template <typename T, bool isPodLike>
+void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) {
+  size_t CurCapacity = this->capacity();
+  size_t CurSize = this->size();
+  // Always grow, even from zero.
+  size_t NewCapacity = size_t(NextPowerOf2(CurCapacity+2));
+  if (NewCapacity < MinSize)
+    NewCapacity = MinSize;
+  T *NewElts = static_cast<T*>(malloc(NewCapacity*sizeof(T)));
+  if (NewElts == nullptr)
+    report_bad_alloc_error("Allocation of SmallVector element failed.");
+
+  // Move the elements over.
+  this->uninitialized_move(this->begin(), this->end(), NewElts);
+
+  // Destroy the original elements.
+  destroy_range(this->begin(), this->end());
+
+  // If this wasn't grown from the inline copy, deallocate the old space.
+  if (!this->isSmall())
+    free(this->begin());
+
+  this->setEnd(NewElts+CurSize);
+  this->BeginX = NewElts;
+  this->CapacityX = this->begin()+NewCapacity;
+}
+
+
+/// SmallVectorTemplateBase<isPodLike = true> - This is where we put method
+/// implementations that are designed to work with POD-like T's.
+template <typename T>
+class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
+protected:
+  SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
+
+  // No need to do a destroy loop for POD's.
+  static void destroy_range(T *, T *) {}
+
+  /// Move the range [I, E) onto the uninitialized memory
+  /// starting with "Dest", constructing elements into it as needed.
+  template<typename It1, typename It2>
+  static void uninitialized_move(It1 I, It1 E, It2 Dest) {
+    // Just do a copy.
+    uninitialized_copy(I, E, Dest);
+  }
+
+  /// Copy the range [I, E) onto the uninitialized memory
+  /// starting with "Dest", constructing elements into it as needed.
+  template<typename It1, typename It2>
+  static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
+    // Arbitrary iterator types; just use the basic implementation.
+    std::uninitialized_copy(I, E, Dest);
+  }
+
+  /// Copy the range [I, E) onto the uninitialized memory
+  /// starting with "Dest", constructing elements into it as needed.
+  template <typename T1, typename T2>
+  static void uninitialized_copy(
+      T1 *I, T1 *E, T2 *Dest,
+      typename std::enable_if<std::is_same<typename std::remove_const<T1>::type,
+                                           T2>::value>::type * = nullptr) {
+    // Use memcpy for PODs iterated by pointers (which includes SmallVector
+    // iterators): std::uninitialized_copy optimizes to memmove, but we can
+    // use memcpy here. Note that I and E are iterators and thus might be
+    // invalid for memcpy if they are equal.
+    if (I != E)
+      memcpy(Dest, I, (E - I) * sizeof(T));
+  }
+
+  /// Double the size of the allocated memory, guaranteeing space for at
+  /// least one more element or MinSize if specified.
+  void grow(size_t MinSize = 0) {
+    this->grow_pod(MinSize*sizeof(T), sizeof(T));
+  }
+
+public:
+  void push_back(const T &Elt) {
+    if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
+      this->grow();
+    memcpy(this->end(), &Elt, sizeof(T));
+    this->setEnd(this->end()+1);
+  }
+
+  void pop_back() {
+    this->setEnd(this->end()-1);
+  }
+};
+
+/// This class consists of common code factored out of the SmallVector class to
+/// reduce code duplication based on the SmallVector 'N' template parameter.
+template <typename T>
+class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
+  using SuperClass = SmallVectorTemplateBase<T, isPodLike<T>::value>;
+
+public:
+  using iterator = typename SuperClass::iterator;
+  using const_iterator = typename SuperClass::const_iterator;
+  using size_type = typename SuperClass::size_type;
+
+protected:
+  // Default ctor - Initialize to empty.
+  explicit SmallVectorImpl(unsigned N)
+    : SmallVectorTemplateBase<T, isPodLike<T>::value>(N*sizeof(T)) {
+  }
+
+public:
+  SmallVectorImpl(const SmallVectorImpl &) = delete;
+
+  ~SmallVectorImpl() {
+    // Subclass has already destructed this vector's elements.
+    // If this wasn't grown from the inline copy, deallocate the old space.
+    if (!this->isSmall())
+      free(this->begin());
+  }
+
+  void clear() {
+    this->destroy_range(this->begin(), this->end());
+    this->EndX = this->BeginX;
+  }
+
+  void resize(size_type N) {
+    if (N < this->size()) {
+      this->destroy_range(this->begin()+N, this->end());
+      this->setEnd(this->begin()+N);
+    } else if (N > this->size()) {
+      if (this->capacity() < N)
+        this->grow(N);
+      for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
+        new (&*I) T();
+      this->setEnd(this->begin()+N);
+    }
+  }
+
+  void resize(size_type N, const T &NV) {
+    if (N < this->size()) {
+      this->destroy_range(this->begin()+N, this->end());
+      this->setEnd(this->begin()+N);
+    } else if (N > this->size()) {
+      if (this->capacity() < N)
+        this->grow(N);
+      std::uninitialized_fill(this->end(), this->begin()+N, NV);
+      this->setEnd(this->begin()+N);
+    }
+  }
+
+  void reserve(size_type N) {
+    if (this->capacity() < N)
+      this->grow(N);
+  }
+
+  LLVM_NODISCARD T pop_back_val() {
+    T Result = ::std::move(this->back());
+    this->pop_back();
+    return Result;
+  }
+
+  void swap(SmallVectorImpl &RHS);
+
+  /// Add the specified range to the end of the SmallVector.
+  template <typename in_iter,
+            typename = typename std::enable_if<std::is_convertible<
+                typename std::iterator_traits<in_iter>::iterator_category,
+                std::input_iterator_tag>::value>::type>
+  void append(in_iter in_start, in_iter in_end) {
+    size_type NumInputs = std::distance(in_start, in_end);
+    // Grow allocated space if needed.
+    if (NumInputs > size_type(this->capacity_ptr()-this->end()))
+      this->grow(this->size()+NumInputs);
+
+    // Copy the new elements over.
+    this->uninitialized_copy(in_start, in_end, this->end());
+    this->setEnd(this->end() + NumInputs);
+  }
+
+  /// Add the specified range to the end of the SmallVector.
+  void append(size_type NumInputs, const T &Elt) {
+    // Grow allocated space if needed.
+    if (NumInputs > size_type(this->capacity_ptr()-this->end()))
+      this->grow(this->size()+NumInputs);
+
+    // Copy the new elements over.
+    std::uninitialized_fill_n(this->end(), NumInputs, Elt);
+    this->setEnd(this->end() + NumInputs);
+  }
+
+  void append(std::initializer_list<T> IL) {
+    append(IL.begin(), IL.end());
+  }
+
+  // FIXME: Consider assigning over existing elements, rather than clearing &
+  // re-initializing them - for all assign(...) variants.
+
+  void assign(size_type NumElts, const T &Elt) {
+    clear();
+    if (this->capacity() < NumElts)
+      this->grow(NumElts);
+    this->setEnd(this->begin()+NumElts);
+    std::uninitialized_fill(this->begin(), this->end(), Elt);
+  }
+
+  template <typename in_iter,
+            typename = typename std::enable_if<std::is_convertible<
+                typename std::iterator_traits<in_iter>::iterator_category,
+                std::input_iterator_tag>::value>::type>
+  void assign(in_iter in_start, in_iter in_end) {
+    clear();
+    append(in_start, in_end);
+  }
+
+  void assign(std::initializer_list<T> IL) {
+    clear();
+    append(IL);
+  }
+
+  iterator erase(const_iterator CI) {
+    // Just cast away constness because this is a non-const member function.
+    iterator I = const_cast<iterator>(CI);
+
+    assert(I >= this->begin() && "Iterator to erase is out of bounds.");
+    assert(I < this->end() && "Erasing at past-the-end iterator.");
+
+    iterator N = I;
+    // Shift all elts down one.
+    std::move(I+1, this->end(), I);
+    // Drop the last elt.
+    this->pop_back();
+    return(N);
+  }
+
+  iterator erase(const_iterator CS, const_iterator CE) {
+    // Just cast away constness because this is a non-const member function.
+    iterator S = const_cast<iterator>(CS);
+    iterator E = const_cast<iterator>(CE);
+
+    assert(S >= this->begin() && "Range to erase is out of bounds.");
+    assert(S <= E && "Trying to erase invalid range.");
+    assert(E <= this->end() && "Trying to erase past the end.");
+
+    iterator N = S;
+    // Shift all elts down.
+    iterator I = std::move(E, this->end(), S);
+    // Drop the last elts.
+    this->destroy_range(I, this->end());
+    this->setEnd(I);
+    return(N);
+  }
+
+  iterator insert(iterator I, T &&Elt) {
+    if (I == this->end()) {  // Important special case for empty vector.
+      this->push_back(::std::move(Elt));
+      return this->end()-1;
+    }
+
+    assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+    assert(I <= this->end() && "Inserting past the end of the vector.");
+
+    if (this->EndX >= this->CapacityX) {
+      size_t EltNo = I-this->begin();
+      this->grow();
+      I = this->begin()+EltNo;
+    }
+
+    ::new ((void*) this->end()) T(::std::move(this->back()));
+    // Push everything else over.
+    std::move_backward(I, this->end()-1, this->end());
+    this->setEnd(this->end()+1);
+
+    // If we just moved the element we're inserting, be sure to update
+    // the reference.
+    T *EltPtr = &Elt;
+    if (I <= EltPtr && EltPtr < this->EndX)
+      ++EltPtr;
+
+    *I = ::std::move(*EltPtr);
+    return I;
+  }
+
+  iterator insert(iterator I, const T &Elt) {
+    if (I == this->end()) {  // Important special case for empty vector.
+      this->push_back(Elt);
+      return this->end()-1;
+    }
+
+    assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+    assert(I <= this->end() && "Inserting past the end of the vector.");
+
+    if (this->EndX >= this->CapacityX) {
+      size_t EltNo = I-this->begin();
+      this->grow();
+      I = this->begin()+EltNo;
+    }
+    ::new ((void*) this->end()) T(std::move(this->back()));
+    // Push everything else over.
+    std::move_backward(I, this->end()-1, this->end());
+    this->setEnd(this->end()+1);
+
+    // If we just moved the element we're inserting, be sure to update
+    // the reference.
+    const T *EltPtr = &Elt;
+    if (I <= EltPtr && EltPtr < this->EndX)
+      ++EltPtr;
+
+    *I = *EltPtr;
+    return I;
+  }
+
+  iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
+    // Convert iterator to elt# to avoid invalidating iterator when we reserve()
+    size_t InsertElt = I - this->begin();
+
+    if (I == this->end()) {  // Important special case for empty vector.
+      append(NumToInsert, Elt);
+      return this->begin()+InsertElt;
+    }
+
+    assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+    assert(I <= this->end() && "Inserting past the end of the vector.");
+
+    // Ensure there is enough space.
+    reserve(this->size() + NumToInsert);
+
+    // Uninvalidate the iterator.
+    I = this->begin()+InsertElt;
+
+    // If there are more elements between the insertion point and the end of the
+    // range than there are being inserted, we can use a simple approach to
+    // insertion.  Since we already reserved space, we know that this won't
+    // reallocate the vector.
+    if (size_t(this->end()-I) >= NumToInsert) {
+      T *OldEnd = this->end();
+      append(std::move_iterator<iterator>(this->end() - NumToInsert),
+             std::move_iterator<iterator>(this->end()));
+
+      // Copy the existing elements that get replaced.
+      std::move_backward(I, OldEnd-NumToInsert, OldEnd);
+
+      std::fill_n(I, NumToInsert, Elt);
+      return I;
+    }
+
+    // Otherwise, we're inserting more elements than exist already, and we're
+    // not inserting at the end.
+
+    // Move over the elements that we're about to overwrite.
+    T *OldEnd = this->end();
+    this->setEnd(this->end() + NumToInsert);
+    size_t NumOverwritten = OldEnd-I;
+    this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
+
+    // Replace the overwritten part.
+    std::fill_n(I, NumOverwritten, Elt);
+
+    // Insert the non-overwritten middle part.
+    std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
+    return I;
+  }
+
+  template <typename ItTy,
+            typename = typename std::enable_if<std::is_convertible<
+                typename std::iterator_traits<ItTy>::iterator_category,
+                std::input_iterator_tag>::value>::type>
+  iterator insert(iterator I, ItTy From, ItTy To) {
+    // Convert iterator to elt# to avoid invalidating iterator when we reserve()
+    size_t InsertElt = I - this->begin();
+
+    if (I == this->end()) {  // Important special case for empty vector.
+      append(From, To);
+      return this->begin()+InsertElt;
+    }
+
+    assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+    assert(I <= this->end() && "Inserting past the end of the vector.");
+
+    size_t NumToInsert = std::distance(From, To);
+
+    // Ensure there is enough space.
+    reserve(this->size() + NumToInsert);
+
+    // Uninvalidate the iterator.
+    I = this->begin()+InsertElt;
+
+    // If there are more elements between the insertion point and the end of the
+    // range than there are being inserted, we can use a simple approach to
+    // insertion.  Since we already reserved space, we know that this won't
+    // reallocate the vector.
+    if (size_t(this->end()-I) >= NumToInsert) {
+      T *OldEnd = this->end();
+      append(std::move_iterator<iterator>(this->end() - NumToInsert),
+             std::move_iterator<iterator>(this->end()));
+
+      // Copy the existing elements that get replaced.
+      std::move_backward(I, OldEnd-NumToInsert, OldEnd);
+
+      std::copy(From, To, I);
+      return I;
+    }
+
+    // Otherwise, we're inserting more elements than exist already, and we're
+    // not inserting at the end.
+
+    // Move over the elements that we're about to overwrite.
+    T *OldEnd = this->end();
+    this->setEnd(this->end() + NumToInsert);
+    size_t NumOverwritten = OldEnd-I;
+    this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
+
+    // Replace the overwritten part.
+    for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
+      *J = *From;
+      ++J; ++From;
+    }
+
+    // Insert the non-overwritten middle part.
+    this->uninitialized_copy(From, To, OldEnd);
+    return I;
+  }
+
+  void insert(iterator I, std::initializer_list<T> IL) {
+    insert(I, IL.begin(), IL.end());
+  }
+
+  template <typename... ArgTypes> void emplace_back(ArgTypes &&... Args) {
+    if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
+      this->grow();
+    ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
+    this->setEnd(this->end() + 1);
+  }
+
+  SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
+
+  SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
+
+  bool operator==(const SmallVectorImpl &RHS) const {
+    if (this->size() != RHS.size()) return false;
+    return std::equal(this->begin(), this->end(), RHS.begin());
+  }
+  bool operator!=(const SmallVectorImpl &RHS) const {
+    return !(*this == RHS);
+  }
+
+  bool operator<(const SmallVectorImpl &RHS) const {
+    return std::lexicographical_compare(this->begin(), this->end(),
+                                        RHS.begin(), RHS.end());
+  }
+
+  /// Set the array size to \p N, which the current array must have enough
+  /// capacity for.
+  ///
+  /// This does not construct or destroy any elements in the vector.
+  ///
+  /// Clients can use this in conjunction with capacity() to write past the end
+  /// of the buffer when they know that more elements are available, and only
+  /// update the size later. This avoids the cost of value initializing elements
+  /// which will only be overwritten.
+  void set_size(size_type N) {
+    assert(N <= this->capacity());
+    this->setEnd(this->begin() + N);
+  }
+};
+
+template <typename T>
+void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
+  if (this == &RHS) return;
+
+  // We can only avoid copying elements if neither vector is small.
+  if (!this->isSmall() && !RHS.isSmall()) {
+    std::swap(this->BeginX, RHS.BeginX);
+    std::swap(this->EndX, RHS.EndX);
+    std::swap(this->CapacityX, RHS.CapacityX);
+    return;
+  }
+  if (RHS.size() > this->capacity())
+    this->grow(RHS.size());
+  if (this->size() > RHS.capacity())
+    RHS.grow(this->size());
+
+  // Swap the shared elements.
+  size_t NumShared = this->size();
+  if (NumShared > RHS.size()) NumShared = RHS.size();
+  for (size_type i = 0; i != NumShared; ++i)
+    std::swap((*this)[i], RHS[i]);
+
+  // Copy over the extra elts.
+  if (this->size() > RHS.size()) {
+    size_t EltDiff = this->size() - RHS.size();
+    this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
+    RHS.setEnd(RHS.end()+EltDiff);
+    this->destroy_range(this->begin()+NumShared, this->end());
+    this->setEnd(this->begin()+NumShared);
+  } else if (RHS.size() > this->size()) {
+    size_t EltDiff = RHS.size() - this->size();
+    this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
+    this->setEnd(this->end() + EltDiff);
+    this->destroy_range(RHS.begin()+NumShared, RHS.end());
+    RHS.setEnd(RHS.begin()+NumShared);
+  }
+}
+
+template <typename T>
+SmallVectorImpl<T> &SmallVectorImpl<T>::
+  operator=(const SmallVectorImpl<T> &RHS) {
+  // Avoid self-assignment.
+  if (this == &RHS) return *this;
+
+  // If we already have sufficient space, assign the common elements, then
+  // destroy any excess.
+  size_t RHSSize = RHS.size();
+  size_t CurSize = this->size();
+  if (CurSize >= RHSSize) {
+    // Assign common elements.
+    iterator NewEnd;
+    if (RHSSize)
+      NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
+    else
+      NewEnd = this->begin();
+
+    // Destroy excess elements.
+    this->destroy_range(NewEnd, this->end());
+
+    // Trim.
+    this->setEnd(NewEnd);
+    return *this;
+  }
+
+  // If we have to grow to have enough elements, destroy the current elements.
+  // This allows us to avoid copying them during the grow.
+  // FIXME: don't do this if they're efficiently moveable.
+  if (this->capacity() < RHSSize) {
+    // Destroy current elements.
+    this->destroy_range(this->begin(), this->end());
+    this->setEnd(this->begin());
+    CurSize = 0;
+    this->grow(RHSSize);
+  } else if (CurSize) {
+    // Otherwise, use assignment for the already-constructed elements.
+    std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
+  }
+
+  // Copy construct the new elements in place.
+  this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
+                           this->begin()+CurSize);
+
+  // Set end.
+  this->setEnd(this->begin()+RHSSize);
+  return *this;
+}
+
+template <typename T>
+SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
+  // Avoid self-assignment.
+  if (this == &RHS) return *this;
+
+  // If the RHS isn't small, clear this vector and then steal its buffer.
+  if (!RHS.isSmall()) {
+    this->destroy_range(this->begin(), this->end());
+    if (!this->isSmall()) free(this->begin());
+    this->BeginX = RHS.BeginX;
+    this->EndX = RHS.EndX;
+    this->CapacityX = RHS.CapacityX;
+    RHS.resetToSmall();
+    return *this;
+  }
+
+  // If we already have sufficient space, assign the common elements, then
+  // destroy any excess.
+  size_t RHSSize = RHS.size();
+  size_t CurSize = this->size();
+  if (CurSize >= RHSSize) {
+    // Assign common elements.
+    iterator NewEnd = this->begin();
+    if (RHSSize)
+      NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
+
+    // Destroy excess elements and trim the bounds.
+    this->destroy_range(NewEnd, this->end());
+    this->setEnd(NewEnd);
+
+    // Clear the RHS.
+    RHS.clear();
+
+    return *this;
+  }
+
+  // If we have to grow to have enough elements, destroy the current elements.
+  // This allows us to avoid copying them during the grow.
+  // FIXME: this may not actually make any sense if we can efficiently move
+  // elements.
+  if (this->capacity() < RHSSize) {
+    // Destroy current elements.
+    this->destroy_range(this->begin(), this->end());
+    this->setEnd(this->begin());
+    CurSize = 0;
+    this->grow(RHSSize);
+  } else if (CurSize) {
+    // Otherwise, use assignment for the already-constructed elements.
+    std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
+  }
+
+  // Move-construct the new elements in place.
+  this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
+                           this->begin()+CurSize);
+
+  // Set end.
+  this->setEnd(this->begin()+RHSSize);
+
+  RHS.clear();
+  return *this;
+}
+
+/// Storage for the SmallVector elements which aren't contained in
+/// SmallVectorTemplateCommon. There are 'N-1' elements here. The remaining '1'
+/// element is in the base class. This is specialized for the N=1 and N=0 cases
+/// to avoid allocating unnecessary storage.
+template <typename T, unsigned N>
+struct SmallVectorStorage {
+  typename SmallVectorTemplateCommon<T>::U InlineElts[N - 1];
+};
+template <typename T> struct SmallVectorStorage<T, 1> {};
+template <typename T> struct SmallVectorStorage<T, 0> {};
+
+/// This is a 'vector' (really, a variable-sized array), optimized
+/// for the case when the array is small.  It contains some number of elements
+/// in-place, which allows it to avoid heap allocation when the actual number of
+/// elements is below that threshold.  This allows normal "small" cases to be
+/// fast without losing generality for large inputs.
+///
+/// Note that this does not attempt to be exception safe.
+///
+template <typename T, unsigned N>
+class SmallVector : public SmallVectorImpl<T> {
+  /// Inline space for elements which aren't stored in the base class.
+  SmallVectorStorage<T, N> Storage;
+
+public:
+  SmallVector() : SmallVectorImpl<T>(N) {}
+
+  ~SmallVector() {
+    // Destroy the constructed elements in the vector.
+    this->destroy_range(this->begin(), this->end());
+  }
+
+  explicit SmallVector(size_t Size, const T &Value = T())
+    : SmallVectorImpl<T>(N) {
+    this->assign(Size, Value);
+  }
+
+  template <typename ItTy,
+            typename = typename std::enable_if<std::is_convertible<
+                typename std::iterator_traits<ItTy>::iterator_category,
+                std::input_iterator_tag>::value>::type>
+  SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
+    this->append(S, E);
+  }
+
+  template <typename RangeTy>
+  explicit SmallVector(const iterator_range<RangeTy> &R)
+      : SmallVectorImpl<T>(N) {
+    this->append(R.begin(), R.end());
+  }
+
+  SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
+    this->assign(IL);
+  }
+
+  SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
+    if (!RHS.empty())
+      SmallVectorImpl<T>::operator=(RHS);
+  }
+
+  const SmallVector &operator=(const SmallVector &RHS) {
+    SmallVectorImpl<T>::operator=(RHS);
+    return *this;
+  }
+
+  SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
+    if (!RHS.empty())
+      SmallVectorImpl<T>::operator=(::std::move(RHS));
+  }
+
+  SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
+    if (!RHS.empty())
+      SmallVectorImpl<T>::operator=(::std::move(RHS));
+  }
+
+  const SmallVector &operator=(SmallVector &&RHS) {
+    SmallVectorImpl<T>::operator=(::std::move(RHS));
+    return *this;
+  }
+
+  const SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
+    SmallVectorImpl<T>::operator=(::std::move(RHS));
+    return *this;
+  }
+
+  const SmallVector &operator=(std::initializer_list<T> IL) {
+    this->assign(IL);
+    return *this;
+  }
+};
+
+template <typename T, unsigned N>
+inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
+  return X.capacity_in_bytes();
+}
+
+} // end namespace llvm
+
+namespace std {
+
+  /// Implement std::swap in terms of SmallVector swap.
+  template<typename T>
+  inline void
+  swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
+    LHS.swap(RHS);
+  }
+
+  /// Implement std::swap in terms of SmallVector swap.
+  template<typename T, unsigned N>
+  inline void
+  swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
+    LHS.swap(RHS);
+  }
+
+} // end namespace std
+
+#endif // LLVM_ADT_SMALLVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/SparseBitVector.h b/linux-x64/clang/include/llvm/ADT/SparseBitVector.h
new file mode 100644
index 0000000..4cbf40c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SparseBitVector.h
@@ -0,0 +1,888 @@
+//===- llvm/ADT/SparseBitVector.h - Efficient Sparse BitVector --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SparseBitVector class.  See the doxygen comment for
+// SparseBitVector for more details on the algorithm used.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SPARSEBITVECTOR_H
+#define LLVM_ADT_SPARSEBITVECTOR_H
+
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <climits>
+#include <cstring>
+#include <iterator>
+#include <list>
+
+namespace llvm {
+
+/// SparseBitVector is an implementation of a bitvector that is sparse by only
+/// storing the elements that have non-zero bits set.  In order to make this
+/// fast for the most common cases, SparseBitVector is implemented as a linked
+/// list of SparseBitVectorElements.  We maintain a pointer to the last
+/// SparseBitVectorElement accessed (in the form of a list iterator), in order
+/// to make multiple in-order test/set constant time after the first one is
+/// executed.  Note that using vectors to store SparseBitVectorElement's does
+/// not work out very well because it causes insertion in the middle to take
+/// enormous amounts of time with a large amount of bits.  Other structures that
+/// have better worst cases for insertion in the middle (various balanced trees,
+/// etc) do not perform as well in practice as a linked list with this iterator
+/// kept up to date.  They are also significantly more memory intensive.
+
+template <unsigned ElementSize = 128> struct SparseBitVectorElement {
+public:
+  using BitWord = unsigned long;
+  using size_type = unsigned;
+  enum {
+    BITWORD_SIZE = sizeof(BitWord) * CHAR_BIT,
+    BITWORDS_PER_ELEMENT = (ElementSize + BITWORD_SIZE - 1) / BITWORD_SIZE,
+    BITS_PER_ELEMENT = ElementSize
+  };
+
+private:
+  // Index of Element in terms of where first bit starts.
+  unsigned ElementIndex;
+  BitWord Bits[BITWORDS_PER_ELEMENT];
+
+  SparseBitVectorElement() {
+    ElementIndex = ~0U;
+    memset(&Bits[0], 0, sizeof (BitWord) * BITWORDS_PER_ELEMENT);
+  }
+
+public:
+  explicit SparseBitVectorElement(unsigned Idx) {
+    ElementIndex = Idx;
+    memset(&Bits[0], 0, sizeof (BitWord) * BITWORDS_PER_ELEMENT);
+  }
+
+  // Comparison.
+  bool operator==(const SparseBitVectorElement &RHS) const {
+    if (ElementIndex != RHS.ElementIndex)
+      return false;
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
+      if (Bits[i] != RHS.Bits[i])
+        return false;
+    return true;
+  }
+
+  bool operator!=(const SparseBitVectorElement &RHS) const {
+    return !(*this == RHS);
+  }
+
+  // Return the bits that make up word Idx in our element.
+  BitWord word(unsigned Idx) const {
+    assert(Idx < BITWORDS_PER_ELEMENT);
+    return Bits[Idx];
+  }
+
+  unsigned index() const {
+    return ElementIndex;
+  }
+
+  bool empty() const {
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
+      if (Bits[i])
+        return false;
+    return true;
+  }
+
+  void set(unsigned Idx) {
+    Bits[Idx / BITWORD_SIZE] |= 1L << (Idx % BITWORD_SIZE);
+  }
+
+  bool test_and_set(unsigned Idx) {
+    bool old = test(Idx);
+    if (!old) {
+      set(Idx);
+      return true;
+    }
+    return false;
+  }
+
+  void reset(unsigned Idx) {
+    Bits[Idx / BITWORD_SIZE] &= ~(1L << (Idx % BITWORD_SIZE));
+  }
+
+  bool test(unsigned Idx) const {
+    return Bits[Idx / BITWORD_SIZE] & (1L << (Idx % BITWORD_SIZE));
+  }
+
+  size_type count() const {
+    unsigned NumBits = 0;
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
+      NumBits += countPopulation(Bits[i]);
+    return NumBits;
+  }
+
+  /// find_first - Returns the index of the first set bit.
+  int find_first() const {
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
+      if (Bits[i] != 0)
+        return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
+    llvm_unreachable("Illegal empty element");
+  }
+
+  /// find_last - Returns the index of the last set bit.
+  int find_last() const {
+    for (unsigned I = 0; I < BITWORDS_PER_ELEMENT; ++I) {
+      unsigned Idx = BITWORDS_PER_ELEMENT - I - 1;
+      if (Bits[Idx] != 0)
+        return Idx * BITWORD_SIZE + BITWORD_SIZE -
+               countLeadingZeros(Bits[Idx]) - 1;
+    }
+    llvm_unreachable("Illegal empty element");
+  }
+
+  /// find_next - Returns the index of the next set bit starting from the
+  /// "Curr" bit. Returns -1 if the next set bit is not found.
+  int find_next(unsigned Curr) const {
+    if (Curr >= BITS_PER_ELEMENT)
+      return -1;
+
+    unsigned WordPos = Curr / BITWORD_SIZE;
+    unsigned BitPos = Curr % BITWORD_SIZE;
+    BitWord Copy = Bits[WordPos];
+    assert(WordPos <= BITWORDS_PER_ELEMENT
+           && "Word Position outside of element");
+
+    // Mask off previous bits.
+    Copy &= ~0UL << BitPos;
+
+    if (Copy != 0)
+      return WordPos * BITWORD_SIZE + countTrailingZeros(Copy);
+
+    // Check subsequent words.
+    for (unsigned i = WordPos+1; i < BITWORDS_PER_ELEMENT; ++i)
+      if (Bits[i] != 0)
+        return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
+    return -1;
+  }
+
+  // Union this element with RHS and return true if this one changed.
+  bool unionWith(const SparseBitVectorElement &RHS) {
+    bool changed = false;
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+      BitWord old = changed ? 0 : Bits[i];
+
+      Bits[i] |= RHS.Bits[i];
+      if (!changed && old != Bits[i])
+        changed = true;
+    }
+    return changed;
+  }
+
+  // Return true if we have any bits in common with RHS
+  bool intersects(const SparseBitVectorElement &RHS) const {
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+      if (RHS.Bits[i] & Bits[i])
+        return true;
+    }
+    return false;
+  }
+
+  // Intersect this Element with RHS and return true if this one changed.
+  // BecameZero is set to true if this element became all-zero bits.
+  bool intersectWith(const SparseBitVectorElement &RHS,
+                     bool &BecameZero) {
+    bool changed = false;
+    bool allzero = true;
+
+    BecameZero = false;
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+      BitWord old = changed ? 0 : Bits[i];
+
+      Bits[i] &= RHS.Bits[i];
+      if (Bits[i] != 0)
+        allzero = false;
+
+      if (!changed && old != Bits[i])
+        changed = true;
+    }
+    BecameZero = allzero;
+    return changed;
+  }
+
+  // Intersect this Element with the complement of RHS and return true if this
+  // one changed.  BecameZero is set to true if this element became all-zero
+  // bits.
+  bool intersectWithComplement(const SparseBitVectorElement &RHS,
+                               bool &BecameZero) {
+    bool changed = false;
+    bool allzero = true;
+
+    BecameZero = false;
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+      BitWord old = changed ? 0 : Bits[i];
+
+      Bits[i] &= ~RHS.Bits[i];
+      if (Bits[i] != 0)
+        allzero = false;
+
+      if (!changed && old != Bits[i])
+        changed = true;
+    }
+    BecameZero = allzero;
+    return changed;
+  }
+
+  // Three argument version of intersectWithComplement that intersects
+  // RHS1 & ~RHS2 into this element
+  void intersectWithComplement(const SparseBitVectorElement &RHS1,
+                               const SparseBitVectorElement &RHS2,
+                               bool &BecameZero) {
+    bool allzero = true;
+
+    BecameZero = false;
+    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+      Bits[i] = RHS1.Bits[i] & ~RHS2.Bits[i];
+      if (Bits[i] != 0)
+        allzero = false;
+    }
+    BecameZero = allzero;
+  }
+};
+
+template <unsigned ElementSize = 128>
+class SparseBitVector {
+  using ElementList = std::list<SparseBitVectorElement<ElementSize>>;
+  using ElementListIter = typename ElementList::iterator;
+  using ElementListConstIter = typename ElementList::const_iterator;
+  enum {
+    BITWORD_SIZE = SparseBitVectorElement<ElementSize>::BITWORD_SIZE
+  };
+
+  // Pointer to our current Element.
+  ElementListIter CurrElementIter;
+  ElementList Elements;
+
+  // This is like std::lower_bound, except we do linear searching from the
+  // current position.
+  ElementListIter FindLowerBound(unsigned ElementIndex) {
+
+    if (Elements.empty()) {
+      CurrElementIter = Elements.begin();
+      return Elements.begin();
+    }
+
+    // Make sure our current iterator is valid.
+    if (CurrElementIter == Elements.end())
+      --CurrElementIter;
+
+    // Search from our current iterator, either backwards or forwards,
+    // depending on what element we are looking for.
+    ElementListIter ElementIter = CurrElementIter;
+    if (CurrElementIter->index() == ElementIndex) {
+      return ElementIter;
+    } else if (CurrElementIter->index() > ElementIndex) {
+      while (ElementIter != Elements.begin()
+             && ElementIter->index() > ElementIndex)
+        --ElementIter;
+    } else {
+      while (ElementIter != Elements.end() &&
+             ElementIter->index() < ElementIndex)
+        ++ElementIter;
+    }
+    CurrElementIter = ElementIter;
+    return ElementIter;
+  }
+
+  // Iterator to walk set bits in the bitmap.  This iterator is a lot uglier
+  // than it would be, in order to be efficient.
+  class SparseBitVectorIterator {
+  private:
+    bool AtEnd;
+
+    const SparseBitVector<ElementSize> *BitVector = nullptr;
+
+    // Current element inside of bitmap.
+    ElementListConstIter Iter;
+
+    // Current bit number inside of our bitmap.
+    unsigned BitNumber;
+
+    // Current word number inside of our element.
+    unsigned WordNumber;
+
+    // Current bits from the element.
+    typename SparseBitVectorElement<ElementSize>::BitWord Bits;
+
+    // Move our iterator to the first non-zero bit in the bitmap.
+    void AdvanceToFirstNonZero() {
+      if (AtEnd)
+        return;
+      if (BitVector->Elements.empty()) {
+        AtEnd = true;
+        return;
+      }
+      Iter = BitVector->Elements.begin();
+      BitNumber = Iter->index() * ElementSize;
+      unsigned BitPos = Iter->find_first();
+      BitNumber += BitPos;
+      WordNumber = (BitNumber % ElementSize) / BITWORD_SIZE;
+      Bits = Iter->word(WordNumber);
+      Bits >>= BitPos % BITWORD_SIZE;
+    }
+
+    // Move our iterator to the next non-zero bit.
+    void AdvanceToNextNonZero() {
+      if (AtEnd)
+        return;
+
+      while (Bits && !(Bits & 1)) {
+        Bits >>= 1;
+        BitNumber += 1;
+      }
+
+      // See if we ran out of Bits in this word.
+      if (!Bits) {
+        int NextSetBitNumber = Iter->find_next(BitNumber % ElementSize) ;
+        // If we ran out of set bits in this element, move to next element.
+        if (NextSetBitNumber == -1 || (BitNumber % ElementSize == 0)) {
+          ++Iter;
+          WordNumber = 0;
+
+          // We may run out of elements in the bitmap.
+          if (Iter == BitVector->Elements.end()) {
+            AtEnd = true;
+            return;
+          }
+          // Set up for next non-zero word in bitmap.
+          BitNumber = Iter->index() * ElementSize;
+          NextSetBitNumber = Iter->find_first();
+          BitNumber += NextSetBitNumber;
+          WordNumber = (BitNumber % ElementSize) / BITWORD_SIZE;
+          Bits = Iter->word(WordNumber);
+          Bits >>= NextSetBitNumber % BITWORD_SIZE;
+        } else {
+          WordNumber = (NextSetBitNumber % ElementSize) / BITWORD_SIZE;
+          Bits = Iter->word(WordNumber);
+          Bits >>= NextSetBitNumber % BITWORD_SIZE;
+          BitNumber = Iter->index() * ElementSize;
+          BitNumber += NextSetBitNumber;
+        }
+      }
+    }
+
+  public:
+    SparseBitVectorIterator() = default;
+
+    SparseBitVectorIterator(const SparseBitVector<ElementSize> *RHS,
+                            bool end = false):BitVector(RHS) {
+      Iter = BitVector->Elements.begin();
+      BitNumber = 0;
+      Bits = 0;
+      WordNumber = ~0;
+      AtEnd = end;
+      AdvanceToFirstNonZero();
+    }
+
+    // Preincrement.
+    inline SparseBitVectorIterator& operator++() {
+      ++BitNumber;
+      Bits >>= 1;
+      AdvanceToNextNonZero();
+      return *this;
+    }
+
+    // Postincrement.
+    inline SparseBitVectorIterator operator++(int) {
+      SparseBitVectorIterator tmp = *this;
+      ++*this;
+      return tmp;
+    }
+
+    // Return the current set bit number.
+    unsigned operator*() const {
+      return BitNumber;
+    }
+
+    bool operator==(const SparseBitVectorIterator &RHS) const {
+      // If they are both at the end, ignore the rest of the fields.
+      if (AtEnd && RHS.AtEnd)
+        return true;
+      // Otherwise they are the same if they have the same bit number and
+      // bitmap.
+      return AtEnd == RHS.AtEnd && RHS.BitNumber == BitNumber;
+    }
+
+    bool operator!=(const SparseBitVectorIterator &RHS) const {
+      return !(*this == RHS);
+    }
+  };
+
+public:
+  using iterator = SparseBitVectorIterator;
+
+  SparseBitVector() {
+    CurrElementIter = Elements.begin();
+  }
+
+  // SparseBitVector copy ctor.
+  SparseBitVector(const SparseBitVector &RHS) {
+    ElementListConstIter ElementIter = RHS.Elements.begin();
+    while (ElementIter != RHS.Elements.end()) {
+      Elements.push_back(SparseBitVectorElement<ElementSize>(*ElementIter));
+      ++ElementIter;
+    }
+
+    CurrElementIter = Elements.begin ();
+  }
+
+  ~SparseBitVector() = default;
+
+  // Clear.
+  void clear() {
+    Elements.clear();
+  }
+
+  // Assignment
+  SparseBitVector& operator=(const SparseBitVector& RHS) {
+    if (this == &RHS)
+      return *this;
+
+    Elements.clear();
+
+    ElementListConstIter ElementIter = RHS.Elements.begin();
+    while (ElementIter != RHS.Elements.end()) {
+      Elements.push_back(SparseBitVectorElement<ElementSize>(*ElementIter));
+      ++ElementIter;
+    }
+
+    CurrElementIter = Elements.begin ();
+
+    return *this;
+  }
+
+  // Test, Reset, and Set a bit in the bitmap.
+  bool test(unsigned Idx) {
+    if (Elements.empty())
+      return false;
+
+    unsigned ElementIndex = Idx / ElementSize;
+    ElementListIter ElementIter = FindLowerBound(ElementIndex);
+
+    // If we can't find an element that is supposed to contain this bit, there
+    // is nothing more to do.
+    if (ElementIter == Elements.end() ||
+        ElementIter->index() != ElementIndex)
+      return false;
+    return ElementIter->test(Idx % ElementSize);
+  }
+
+  void reset(unsigned Idx) {
+    if (Elements.empty())
+      return;
+
+    unsigned ElementIndex = Idx / ElementSize;
+    ElementListIter ElementIter = FindLowerBound(ElementIndex);
+
+    // If we can't find an element that is supposed to contain this bit, there
+    // is nothing more to do.
+    if (ElementIter == Elements.end() ||
+        ElementIter->index() != ElementIndex)
+      return;
+    ElementIter->reset(Idx % ElementSize);
+
+    // When the element is zeroed out, delete it.
+    if (ElementIter->empty()) {
+      ++CurrElementIter;
+      Elements.erase(ElementIter);
+    }
+  }
+
+  void set(unsigned Idx) {
+    unsigned ElementIndex = Idx / ElementSize;
+    ElementListIter ElementIter;
+    if (Elements.empty()) {
+      ElementIter = Elements.emplace(Elements.end(), ElementIndex);
+    } else {
+      ElementIter = FindLowerBound(ElementIndex);
+
+      if (ElementIter == Elements.end() ||
+          ElementIter->index() != ElementIndex) {
+        // We may have hit the beginning of our SparseBitVector, in which case,
+        // we may need to insert right after this element, which requires moving
+        // the current iterator forward one, because insert does insert before.
+        if (ElementIter != Elements.end() &&
+            ElementIter->index() < ElementIndex)
+          ++ElementIter;
+        ElementIter = Elements.emplace(ElementIter, ElementIndex);
+      }
+    }
+    CurrElementIter = ElementIter;
+
+    ElementIter->set(Idx % ElementSize);
+  }
+
+  bool test_and_set(unsigned Idx) {
+    bool old = test(Idx);
+    if (!old) {
+      set(Idx);
+      return true;
+    }
+    return false;
+  }
+
+  bool operator!=(const SparseBitVector &RHS) const {
+    return !(*this == RHS);
+  }
+
+  bool operator==(const SparseBitVector &RHS) const {
+    ElementListConstIter Iter1 = Elements.begin();
+    ElementListConstIter Iter2 = RHS.Elements.begin();
+
+    for (; Iter1 != Elements.end() && Iter2 != RHS.Elements.end();
+         ++Iter1, ++Iter2) {
+      if (*Iter1 != *Iter2)
+        return false;
+    }
+    return Iter1 == Elements.end() && Iter2 == RHS.Elements.end();
+  }
+
+  // Union our bitmap with the RHS and return true if we changed.
+  bool operator|=(const SparseBitVector &RHS) {
+    if (this == &RHS)
+      return false;
+
+    bool changed = false;
+    ElementListIter Iter1 = Elements.begin();
+    ElementListConstIter Iter2 = RHS.Elements.begin();
+
+    // If RHS is empty, we are done
+    if (RHS.Elements.empty())
+      return false;
+
+    while (Iter2 != RHS.Elements.end()) {
+      if (Iter1 == Elements.end() || Iter1->index() > Iter2->index()) {
+        Elements.insert(Iter1, *Iter2);
+        ++Iter2;
+        changed = true;
+      } else if (Iter1->index() == Iter2->index()) {
+        changed |= Iter1->unionWith(*Iter2);
+        ++Iter1;
+        ++Iter2;
+      } else {
+        ++Iter1;
+      }
+    }
+    CurrElementIter = Elements.begin();
+    return changed;
+  }
+
+  // Intersect our bitmap with the RHS and return true if ours changed.
+  bool operator&=(const SparseBitVector &RHS) {
+    if (this == &RHS)
+      return false;
+
+    bool changed = false;
+    ElementListIter Iter1 = Elements.begin();
+    ElementListConstIter Iter2 = RHS.Elements.begin();
+
+    // Check if both bitmaps are empty.
+    if (Elements.empty() && RHS.Elements.empty())
+      return false;
+
+    // Loop through, intersecting as we go, erasing elements when necessary.
+    while (Iter2 != RHS.Elements.end()) {
+      if (Iter1 == Elements.end()) {
+        CurrElementIter = Elements.begin();
+        return changed;
+      }
+
+      if (Iter1->index() > Iter2->index()) {
+        ++Iter2;
+      } else if (Iter1->index() == Iter2->index()) {
+        bool BecameZero;
+        changed |= Iter1->intersectWith(*Iter2, BecameZero);
+        if (BecameZero) {
+          ElementListIter IterTmp = Iter1;
+          ++Iter1;
+          Elements.erase(IterTmp);
+        } else {
+          ++Iter1;
+        }
+        ++Iter2;
+      } else {
+        ElementListIter IterTmp = Iter1;
+        ++Iter1;
+        Elements.erase(IterTmp);
+        changed = true;
+      }
+    }
+    if (Iter1 != Elements.end()) {
+      Elements.erase(Iter1, Elements.end());
+      changed = true;
+    }
+    CurrElementIter = Elements.begin();
+    return changed;
+  }
+
+  // Intersect our bitmap with the complement of the RHS and return true
+  // if ours changed.
+  bool intersectWithComplement(const SparseBitVector &RHS) {
+    if (this == &RHS) {
+      if (!empty()) {
+        clear();
+        return true;
+      }
+      return false;
+    }
+
+    bool changed = false;
+    ElementListIter Iter1 = Elements.begin();
+    ElementListConstIter Iter2 = RHS.Elements.begin();
+
+    // If either our bitmap or RHS is empty, we are done
+    if (Elements.empty() || RHS.Elements.empty())
+      return false;
+
+    // Loop through, intersecting as we go, erasing elements when necessary.
+    while (Iter2 != RHS.Elements.end()) {
+      if (Iter1 == Elements.end()) {
+        CurrElementIter = Elements.begin();
+        return changed;
+      }
+
+      if (Iter1->index() > Iter2->index()) {
+        ++Iter2;
+      } else if (Iter1->index() == Iter2->index()) {
+        bool BecameZero;
+        changed |= Iter1->intersectWithComplement(*Iter2, BecameZero);
+        if (BecameZero) {
+          ElementListIter IterTmp = Iter1;
+          ++Iter1;
+          Elements.erase(IterTmp);
+        } else {
+          ++Iter1;
+        }
+        ++Iter2;
+      } else {
+        ++Iter1;
+      }
+    }
+    CurrElementIter = Elements.begin();
+    return changed;
+  }
+
+  bool intersectWithComplement(const SparseBitVector<ElementSize> *RHS) const {
+    return intersectWithComplement(*RHS);
+  }
+
+  //  Three argument version of intersectWithComplement.
+  //  Result of RHS1 & ~RHS2 is stored into this bitmap.
+  void intersectWithComplement(const SparseBitVector<ElementSize> &RHS1,
+                               const SparseBitVector<ElementSize> &RHS2)
+  {
+    if (this == &RHS1) {
+      intersectWithComplement(RHS2);
+      return;
+    } else if (this == &RHS2) {
+      SparseBitVector RHS2Copy(RHS2);
+      intersectWithComplement(RHS1, RHS2Copy);
+      return;
+    }
+
+    Elements.clear();
+    CurrElementIter = Elements.begin();
+    ElementListConstIter Iter1 = RHS1.Elements.begin();
+    ElementListConstIter Iter2 = RHS2.Elements.begin();
+
+    // If RHS1 is empty, we are done
+    // If RHS2 is empty, we still have to copy RHS1
+    if (RHS1.Elements.empty())
+      return;
+
+    // Loop through, intersecting as we go, erasing elements when necessary.
+    while (Iter2 != RHS2.Elements.end()) {
+      if (Iter1 == RHS1.Elements.end())
+        return;
+
+      if (Iter1->index() > Iter2->index()) {
+        ++Iter2;
+      } else if (Iter1->index() == Iter2->index()) {
+        bool BecameZero = false;
+        Elements.emplace_back(Iter1->index());
+        Elements.back().intersectWithComplement(*Iter1, *Iter2, BecameZero);
+        if (BecameZero)
+          Elements.pop_back();
+        ++Iter1;
+        ++Iter2;
+      } else {
+        Elements.push_back(*Iter1++);
+      }
+    }
+
+    // copy the remaining elements
+    std::copy(Iter1, RHS1.Elements.end(), std::back_inserter(Elements));
+  }
+
+  void intersectWithComplement(const SparseBitVector<ElementSize> *RHS1,
+                               const SparseBitVector<ElementSize> *RHS2) {
+    intersectWithComplement(*RHS1, *RHS2);
+  }
+
+  bool intersects(const SparseBitVector<ElementSize> *RHS) const {
+    return intersects(*RHS);
+  }
+
+  // Return true if we share any bits in common with RHS
+  bool intersects(const SparseBitVector<ElementSize> &RHS) const {
+    ElementListConstIter Iter1 = Elements.begin();
+    ElementListConstIter Iter2 = RHS.Elements.begin();
+
+    // Check if both bitmaps are empty.
+    if (Elements.empty() && RHS.Elements.empty())
+      return false;
+
+    // Loop through, intersecting stopping when we hit bits in common.
+    while (Iter2 != RHS.Elements.end()) {
+      if (Iter1 == Elements.end())
+        return false;
+
+      if (Iter1->index() > Iter2->index()) {
+        ++Iter2;
+      } else if (Iter1->index() == Iter2->index()) {
+        if (Iter1->intersects(*Iter2))
+          return true;
+        ++Iter1;
+        ++Iter2;
+      } else {
+        ++Iter1;
+      }
+    }
+    return false;
+  }
+
+  // Return true iff all bits set in this SparseBitVector are
+  // also set in RHS.
+  bool contains(const SparseBitVector<ElementSize> &RHS) const {
+    SparseBitVector<ElementSize> Result(*this);
+    Result &= RHS;
+    return (Result == RHS);
+  }
+
+  // Return the first set bit in the bitmap.  Return -1 if no bits are set.
+  int find_first() const {
+    if (Elements.empty())
+      return -1;
+    const SparseBitVectorElement<ElementSize> &First = *(Elements.begin());
+    return (First.index() * ElementSize) + First.find_first();
+  }
+
+  // Return the last set bit in the bitmap.  Return -1 if no bits are set.
+  int find_last() const {
+    if (Elements.empty())
+      return -1;
+    const SparseBitVectorElement<ElementSize> &Last = *(Elements.rbegin());
+    return (Last.index() * ElementSize) + Last.find_last();
+  }
+
+  // Return true if the SparseBitVector is empty
+  bool empty() const {
+    return Elements.empty();
+  }
+
+  unsigned count() const {
+    unsigned BitCount = 0;
+    for (ElementListConstIter Iter = Elements.begin();
+         Iter != Elements.end();
+         ++Iter)
+      BitCount += Iter->count();
+
+    return BitCount;
+  }
+
+  iterator begin() const {
+    return iterator(this);
+  }
+
+  iterator end() const {
+    return iterator(this, true);
+  }
+};
+
+// Convenience functions to allow Or and And without dereferencing in the user
+// code.
+
+template <unsigned ElementSize>
+inline bool operator |=(SparseBitVector<ElementSize> &LHS,
+                        const SparseBitVector<ElementSize> *RHS) {
+  return LHS |= *RHS;
+}
+
+template <unsigned ElementSize>
+inline bool operator |=(SparseBitVector<ElementSize> *LHS,
+                        const SparseBitVector<ElementSize> &RHS) {
+  return LHS->operator|=(RHS);
+}
+
+template <unsigned ElementSize>
+inline bool operator &=(SparseBitVector<ElementSize> *LHS,
+                        const SparseBitVector<ElementSize> &RHS) {
+  return LHS->operator&=(RHS);
+}
+
+template <unsigned ElementSize>
+inline bool operator &=(SparseBitVector<ElementSize> &LHS,
+                        const SparseBitVector<ElementSize> *RHS) {
+  return LHS &= *RHS;
+}
+
+// Convenience functions for infix union, intersection, difference operators.
+
+template <unsigned ElementSize>
+inline SparseBitVector<ElementSize>
+operator|(const SparseBitVector<ElementSize> &LHS,
+          const SparseBitVector<ElementSize> &RHS) {
+  SparseBitVector<ElementSize> Result(LHS);
+  Result |= RHS;
+  return Result;
+}
+
+template <unsigned ElementSize>
+inline SparseBitVector<ElementSize>
+operator&(const SparseBitVector<ElementSize> &LHS,
+          const SparseBitVector<ElementSize> &RHS) {
+  SparseBitVector<ElementSize> Result(LHS);
+  Result &= RHS;
+  return Result;
+}
+
+template <unsigned ElementSize>
+inline SparseBitVector<ElementSize>
+operator-(const SparseBitVector<ElementSize> &LHS,
+          const SparseBitVector<ElementSize> &RHS) {
+  SparseBitVector<ElementSize> Result;
+  Result.intersectWithComplement(LHS, RHS);
+  return Result;
+}
+
+// Dump a SparseBitVector to a stream
+template <unsigned ElementSize>
+void dump(const SparseBitVector<ElementSize> &LHS, raw_ostream &out) {
+  out << "[";
+
+  typename SparseBitVector<ElementSize>::iterator bi = LHS.begin(),
+    be = LHS.end();
+  if (bi != be) {
+    out << *bi;
+    for (++bi; bi != be; ++bi) {
+      out << " " << *bi;
+    }
+  }
+  out << "]\n";
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SPARSEBITVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/SparseMultiSet.h b/linux-x64/clang/include/llvm/ADT/SparseMultiSet.h
new file mode 100644
index 0000000..3c86376
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SparseMultiSet.h
@@ -0,0 +1,523 @@
+//===- llvm/ADT/SparseMultiSet.h - Sparse multiset --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SparseMultiSet class, which adds multiset behavior to
+// the SparseSet.
+//
+// A sparse multiset holds a small number of objects identified by integer keys
+// from a moderately sized universe. The sparse multiset uses more memory than
+// other containers in order to provide faster operations. Any key can map to
+// multiple values. A SparseMultiSetNode class is provided, which serves as a
+// convenient base class for the contents of a SparseMultiSet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SPARSEMULTISET_H
+#define LLVM_ADT_SPARSEMULTISET_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SparseSet.h"
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
+#include <iterator>
+#include <limits>
+#include <utility>
+
+namespace llvm {
+
+/// Fast multiset implementation for objects that can be identified by small
+/// unsigned keys.
+///
+/// SparseMultiSet allocates memory proportional to the size of the key
+/// universe, so it is not recommended for building composite data structures.
+/// It is useful for algorithms that require a single set with fast operations.
+///
+/// Compared to DenseSet and DenseMap, SparseMultiSet provides constant-time
+/// fast clear() as fast as a vector.  The find(), insert(), and erase()
+/// operations are all constant time, and typically faster than a hash table.
+/// The iteration order doesn't depend on numerical key values, it only depends
+/// on the order of insert() and erase() operations.  Iteration order is the
+/// insertion order. Iteration is only provided over elements of equivalent
+/// keys, but iterators are bidirectional.
+///
+/// Compared to BitVector, SparseMultiSet<unsigned> uses 8x-40x more memory, but
+/// offers constant-time clear() and size() operations as well as fast iteration
+/// independent on the size of the universe.
+///
+/// SparseMultiSet contains a dense vector holding all the objects and a sparse
+/// array holding indexes into the dense vector.  Most of the memory is used by
+/// the sparse array which is the size of the key universe. The SparseT template
+/// parameter provides a space/speed tradeoff for sets holding many elements.
+///
+/// When SparseT is uint32_t, find() only touches up to 3 cache lines, but the
+/// sparse array uses 4 x Universe bytes.
+///
+/// When SparseT is uint8_t (the default), find() touches up to 3+[N/256] cache
+/// lines, but the sparse array is 4x smaller.  N is the number of elements in
+/// the set.
+///
+/// For sets that may grow to thousands of elements, SparseT should be set to
+/// uint16_t or uint32_t.
+///
+/// Multiset behavior is provided by providing doubly linked lists for values
+/// that are inlined in the dense vector. SparseMultiSet is a good choice when
+/// one desires a growable number of entries per key, as it will retain the
+/// SparseSet algorithmic properties despite being growable. Thus, it is often a
+/// better choice than a SparseSet of growable containers or a vector of
+/// vectors. SparseMultiSet also keeps iterators valid after erasure (provided
+/// the iterators don't point to the element erased), allowing for more
+/// intuitive and fast removal.
+///
+/// @tparam ValueT      The type of objects in the set.
+/// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT.
+/// @tparam SparseT     An unsigned integer type. See above.
+///
+template<typename ValueT,
+         typename KeyFunctorT = identity<unsigned>,
+         typename SparseT = uint8_t>
+class SparseMultiSet {
+  static_assert(std::numeric_limits<SparseT>::is_integer &&
+                !std::numeric_limits<SparseT>::is_signed,
+                "SparseT must be an unsigned integer type");
+
+  /// The actual data that's stored, as a doubly-linked list implemented via
+  /// indices into the DenseVector.  The doubly linked list is implemented
+  /// circular in Prev indices, and INVALID-terminated in Next indices. This
+  /// provides efficient access to list tails. These nodes can also be
+  /// tombstones, in which case they are actually nodes in a single-linked
+  /// freelist of recyclable slots.
+  struct SMSNode {
+    static const unsigned INVALID = ~0U;
+
+    ValueT Data;
+    unsigned Prev;
+    unsigned Next;
+
+    SMSNode(ValueT D, unsigned P, unsigned N) : Data(D), Prev(P), Next(N) {}
+
+    /// List tails have invalid Nexts.
+    bool isTail() const {
+      return Next == INVALID;
+    }
+
+    /// Whether this node is a tombstone node, and thus is in our freelist.
+    bool isTombstone() const {
+      return Prev == INVALID;
+    }
+
+    /// Since the list is circular in Prev, all non-tombstone nodes have a valid
+    /// Prev.
+    bool isValid() const { return Prev != INVALID; }
+  };
+
+  using KeyT = typename KeyFunctorT::argument_type;
+  using DenseT = SmallVector<SMSNode, 8>;
+  DenseT Dense;
+  SparseT *Sparse = nullptr;
+  unsigned Universe = 0;
+  KeyFunctorT KeyIndexOf;
+  SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;
+
+  /// We have a built-in recycler for reusing tombstone slots. This recycler
+  /// puts a singly-linked free list into tombstone slots, allowing us quick
+  /// erasure, iterator preservation, and dense size.
+  unsigned FreelistIdx = SMSNode::INVALID;
+  unsigned NumFree = 0;
+
+  unsigned sparseIndex(const ValueT &Val) const {
+    assert(ValIndexOf(Val) < Universe &&
+           "Invalid key in set. Did object mutate?");
+    return ValIndexOf(Val);
+  }
+  unsigned sparseIndex(const SMSNode &N) const { return sparseIndex(N.Data); }
+
+  /// Whether the given entry is the head of the list. List heads's previous
+  /// pointers are to the tail of the list, allowing for efficient access to the
+  /// list tail. D must be a valid entry node.
+  bool isHead(const SMSNode &D) const {
+    assert(D.isValid() && "Invalid node for head");
+    return Dense[D.Prev].isTail();
+  }
+
+  /// Whether the given entry is a singleton entry, i.e. the only entry with
+  /// that key.
+  bool isSingleton(const SMSNode &N) const {
+    assert(N.isValid() && "Invalid node for singleton");
+    // Is N its own predecessor?
+    return &Dense[N.Prev] == &N;
+  }
+
+  /// Add in the given SMSNode. Uses a free entry in our freelist if
+  /// available. Returns the index of the added node.
+  unsigned addValue(const ValueT& V, unsigned Prev, unsigned Next) {
+    if (NumFree == 0) {
+      Dense.push_back(SMSNode(V, Prev, Next));
+      return Dense.size() - 1;
+    }
+
+    // Peel off a free slot
+    unsigned Idx = FreelistIdx;
+    unsigned NextFree = Dense[Idx].Next;
+    assert(Dense[Idx].isTombstone() && "Non-tombstone free?");
+
+    Dense[Idx] = SMSNode(V, Prev, Next);
+    FreelistIdx = NextFree;
+    --NumFree;
+    return Idx;
+  }
+
+  /// Make the current index a new tombstone. Pushes it onto the freelist.
+  void makeTombstone(unsigned Idx) {
+    Dense[Idx].Prev = SMSNode::INVALID;
+    Dense[Idx].Next = FreelistIdx;
+    FreelistIdx = Idx;
+    ++NumFree;
+  }
+
+public:
+  using value_type = ValueT;
+  using reference = ValueT &;
+  using const_reference = const ValueT &;
+  using pointer = ValueT *;
+  using const_pointer = const ValueT *;
+  using size_type = unsigned;
+
+  SparseMultiSet() = default;
+  SparseMultiSet(const SparseMultiSet &) = delete;
+  SparseMultiSet &operator=(const SparseMultiSet &) = delete;
+  ~SparseMultiSet() { free(Sparse); }
+
+  /// Set the universe size which determines the largest key the set can hold.
+  /// The universe must be sized before any elements can be added.
+  ///
+  /// @param U Universe size. All object keys must be less than U.
+  ///
+  void setUniverse(unsigned U) {
+    // It's not hard to resize the universe on a non-empty set, but it doesn't
+    // seem like a likely use case, so we can add that code when we need it.
+    assert(empty() && "Can only resize universe on an empty map");
+    // Hysteresis prevents needless reallocations.
+    if (U >= Universe/4 && U <= Universe)
+      return;
+    free(Sparse);
+    // The Sparse array doesn't actually need to be initialized, so malloc
+    // would be enough here, but that will cause tools like valgrind to
+    // complain about branching on uninitialized data.
+    Sparse = static_cast<SparseT*>(safe_calloc(U, sizeof(SparseT)));
+    Universe = U;
+  }
+
+  /// Our iterators are iterators over the collection of objects that share a
+  /// key.
+  template<typename SMSPtrTy>
+  class iterator_base : public std::iterator<std::bidirectional_iterator_tag,
+                                             ValueT> {
+    friend class SparseMultiSet;
+
+    SMSPtrTy SMS;
+    unsigned Idx;
+    unsigned SparseIdx;
+
+    iterator_base(SMSPtrTy P, unsigned I, unsigned SI)
+      : SMS(P), Idx(I), SparseIdx(SI) {}
+
+    /// Whether our iterator has fallen outside our dense vector.
+    bool isEnd() const {
+      if (Idx == SMSNode::INVALID)
+        return true;
+
+      assert(Idx < SMS->Dense.size() && "Out of range, non-INVALID Idx?");
+      return false;
+    }
+
+    /// Whether our iterator is properly keyed, i.e. the SparseIdx is valid
+    bool isKeyed() const { return SparseIdx < SMS->Universe; }
+
+    unsigned Prev() const { return SMS->Dense[Idx].Prev; }
+    unsigned Next() const { return SMS->Dense[Idx].Next; }
+
+    void setPrev(unsigned P) { SMS->Dense[Idx].Prev = P; }
+    void setNext(unsigned N) { SMS->Dense[Idx].Next = N; }
+
+  public:
+    using super = std::iterator<std::bidirectional_iterator_tag, ValueT>;
+    using value_type = typename super::value_type;
+    using difference_type = typename super::difference_type;
+    using pointer = typename super::pointer;
+    using reference = typename super::reference;
+
+    reference operator*() const {
+      assert(isKeyed() && SMS->sparseIndex(SMS->Dense[Idx].Data) == SparseIdx &&
+             "Dereferencing iterator of invalid key or index");
+
+      return SMS->Dense[Idx].Data;
+    }
+    pointer operator->() const { return &operator*(); }
+
+    /// Comparison operators
+    bool operator==(const iterator_base &RHS) const {
+      // end compares equal
+      if (SMS == RHS.SMS && Idx == RHS.Idx) {
+        assert((isEnd() || SparseIdx == RHS.SparseIdx) &&
+               "Same dense entry, but different keys?");
+        return true;
+      }
+
+      return false;
+    }
+
+    bool operator!=(const iterator_base &RHS) const {
+      return !operator==(RHS);
+    }
+
+    /// Increment and decrement operators
+    iterator_base &operator--() { // predecrement - Back up
+      assert(isKeyed() && "Decrementing an invalid iterator");
+      assert((isEnd() || !SMS->isHead(SMS->Dense[Idx])) &&
+             "Decrementing head of list");
+
+      // If we're at the end, then issue a new find()
+      if (isEnd())
+        Idx = SMS->findIndex(SparseIdx).Prev();
+      else
+        Idx = Prev();
+
+      return *this;
+    }
+    iterator_base &operator++() { // preincrement - Advance
+      assert(!isEnd() && isKeyed() && "Incrementing an invalid/end iterator");
+      Idx = Next();
+      return *this;
+    }
+    iterator_base operator--(int) { // postdecrement
+      iterator_base I(*this);
+      --*this;
+      return I;
+    }
+    iterator_base operator++(int) { // postincrement
+      iterator_base I(*this);
+      ++*this;
+      return I;
+    }
+  };
+
+  using iterator = iterator_base<SparseMultiSet *>;
+  using const_iterator = iterator_base<const SparseMultiSet *>;
+
+  // Convenience types
+  using RangePair = std::pair<iterator, iterator>;
+
+  /// Returns an iterator past this container. Note that such an iterator cannot
+  /// be decremented, but will compare equal to other end iterators.
+  iterator end() { return iterator(this, SMSNode::INVALID, SMSNode::INVALID); }
+  const_iterator end() const {
+    return const_iterator(this, SMSNode::INVALID, SMSNode::INVALID);
+  }
+
+  /// Returns true if the set is empty.
+  ///
+  /// This is not the same as BitVector::empty().
+  ///
+  bool empty() const { return size() == 0; }
+
+  /// Returns the number of elements in the set.
+  ///
+  /// This is not the same as BitVector::size() which returns the size of the
+  /// universe.
+  ///
+  size_type size() const {
+    assert(NumFree <= Dense.size() && "Out-of-bounds free entries");
+    return Dense.size() - NumFree;
+  }
+
+  /// Clears the set.  This is a very fast constant time operation.
+  ///
+  void clear() {
+    // Sparse does not need to be cleared, see find().
+    Dense.clear();
+    NumFree = 0;
+    FreelistIdx = SMSNode::INVALID;
+  }
+
+  /// Find an element by its index.
+  ///
+  /// @param   Idx A valid index to find.
+  /// @returns An iterator to the element identified by key, or end().
+  ///
+  iterator findIndex(unsigned Idx) {
+    assert(Idx < Universe && "Key out of range");
+    const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
+    for (unsigned i = Sparse[Idx], e = Dense.size(); i < e; i += Stride) {
+      const unsigned FoundIdx = sparseIndex(Dense[i]);
+      // Check that we're pointing at the correct entry and that it is the head
+      // of a valid list.
+      if (Idx == FoundIdx && Dense[i].isValid() && isHead(Dense[i]))
+        return iterator(this, i, Idx);
+      // Stride is 0 when SparseT >= unsigned.  We don't need to loop.
+      if (!Stride)
+        break;
+    }
+    return end();
+  }
+
+  /// Find an element by its key.
+  ///
+  /// @param   Key A valid key to find.
+  /// @returns An iterator to the element identified by key, or end().
+  ///
+  iterator find(const KeyT &Key) {
+    return findIndex(KeyIndexOf(Key));
+  }
+
+  const_iterator find(const KeyT &Key) const {
+    iterator I = const_cast<SparseMultiSet*>(this)->findIndex(KeyIndexOf(Key));
+    return const_iterator(I.SMS, I.Idx, KeyIndexOf(Key));
+  }
+
+  /// Returns the number of elements identified by Key. This will be linear in
+  /// the number of elements of that key.
+  size_type count(const KeyT &Key) const {
+    unsigned Ret = 0;
+    for (const_iterator It = find(Key); It != end(); ++It)
+      ++Ret;
+
+    return Ret;
+  }
+
+  /// Returns true if this set contains an element identified by Key.
+  bool contains(const KeyT &Key) const {
+    return find(Key) != end();
+  }
+
+  /// Return the head and tail of the subset's list, otherwise returns end().
+  iterator getHead(const KeyT &Key) { return find(Key); }
+  iterator getTail(const KeyT &Key) {
+    iterator I = find(Key);
+    if (I != end())
+      I = iterator(this, I.Prev(), KeyIndexOf(Key));
+    return I;
+  }
+
+  /// The bounds of the range of items sharing Key K. First member is the head
+  /// of the list, and the second member is a decrementable end iterator for
+  /// that key.
+  RangePair equal_range(const KeyT &K) {
+    iterator B = find(K);
+    iterator E = iterator(this, SMSNode::INVALID, B.SparseIdx);
+    return make_pair(B, E);
+  }
+
+  /// Insert a new element at the tail of the subset list. Returns an iterator
+  /// to the newly added entry.
+  iterator insert(const ValueT &Val) {
+    unsigned Idx = sparseIndex(Val);
+    iterator I = findIndex(Idx);
+
+    unsigned NodeIdx = addValue(Val, SMSNode::INVALID, SMSNode::INVALID);
+
+    if (I == end()) {
+      // Make a singleton list
+      Sparse[Idx] = NodeIdx;
+      Dense[NodeIdx].Prev = NodeIdx;
+      return iterator(this, NodeIdx, Idx);
+    }
+
+    // Stick it at the end.
+    unsigned HeadIdx = I.Idx;
+    unsigned TailIdx = I.Prev();
+    Dense[TailIdx].Next = NodeIdx;
+    Dense[HeadIdx].Prev = NodeIdx;
+    Dense[NodeIdx].Prev = TailIdx;
+
+    return iterator(this, NodeIdx, Idx);
+  }
+
+  /// Erases an existing element identified by a valid iterator.
+  ///
+  /// This invalidates iterators pointing at the same entry, but erase() returns
+  /// an iterator pointing to the next element in the subset's list. This makes
+  /// it possible to erase selected elements while iterating over the subset:
+  ///
+  ///   tie(I, E) = Set.equal_range(Key);
+  ///   while (I != E)
+  ///     if (test(*I))
+  ///       I = Set.erase(I);
+  ///     else
+  ///       ++I;
+  ///
+  /// Note that if the last element in the subset list is erased, this will
+  /// return an end iterator which can be decremented to get the new tail (if it
+  /// exists):
+  ///
+  ///  tie(B, I) = Set.equal_range(Key);
+  ///  for (bool isBegin = B == I; !isBegin; /* empty */) {
+  ///    isBegin = (--I) == B;
+  ///    if (test(I))
+  ///      break;
+  ///    I = erase(I);
+  ///  }
+  iterator erase(iterator I) {
+    assert(I.isKeyed() && !I.isEnd() && !Dense[I.Idx].isTombstone() &&
+           "erasing invalid/end/tombstone iterator");
+
+    // First, unlink the node from its list. Then swap the node out with the
+    // dense vector's last entry
+    iterator NextI = unlink(Dense[I.Idx]);
+
+    // Put in a tombstone.
+    makeTombstone(I.Idx);
+
+    return NextI;
+  }
+
+  /// Erase all elements with the given key. This invalidates all
+  /// iterators of that key.
+  void eraseAll(const KeyT &K) {
+    for (iterator I = find(K); I != end(); /* empty */)
+      I = erase(I);
+  }
+
+private:
+  /// Unlink the node from its list. Returns the next node in the list.
+  iterator unlink(const SMSNode &N) {
+    if (isSingleton(N)) {
+      // Singleton is already unlinked
+      assert(N.Next == SMSNode::INVALID && "Singleton has next?");
+      return iterator(this, SMSNode::INVALID, ValIndexOf(N.Data));
+    }
+
+    if (isHead(N)) {
+      // If we're the head, then update the sparse array and our next.
+      Sparse[sparseIndex(N)] = N.Next;
+      Dense[N.Next].Prev = N.Prev;
+      return iterator(this, N.Next, ValIndexOf(N.Data));
+    }
+
+    if (N.isTail()) {
+      // If we're the tail, then update our head and our previous.
+      findIndex(sparseIndex(N)).setPrev(N.Prev);
+      Dense[N.Prev].Next = N.Next;
+
+      // Give back an end iterator that can be decremented
+      iterator I(this, N.Prev, ValIndexOf(N.Data));
+      return ++I;
+    }
+
+    // Otherwise, just drop us
+    Dense[N.Next].Prev = N.Prev;
+    Dense[N.Prev].Next = N.Next;
+    return iterator(this, N.Next, ValIndexOf(N.Data));
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SPARSEMULTISET_H
diff --git a/linux-x64/clang/include/llvm/ADT/SparseSet.h b/linux-x64/clang/include/llvm/ADT/SparseSet.h
new file mode 100644
index 0000000..74cc6da
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/SparseSet.h
@@ -0,0 +1,316 @@
+//===- llvm/ADT/SparseSet.h - Sparse set ------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SparseSet class derived from the version described in
+// Briggs, Torczon, "An efficient representation for sparse sets", ACM Letters
+// on Programming Languages and Systems, Volume 2 Issue 1-4, March-Dec.  1993.
+//
+// A sparse set holds a small number of objects identified by integer keys from
+// a moderately sized universe. The sparse set uses more memory than other
+// containers in order to provide faster operations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SPARSESET_H
+#define LLVM_ADT_SPARSESET_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
+#include <limits>
+#include <utility>
+
+namespace llvm {
+
+/// SparseSetValTraits - Objects in a SparseSet are identified by keys that can
+/// be uniquely converted to a small integer less than the set's universe. This
+/// class allows the set to hold values that differ from the set's key type as
+/// long as an index can still be derived from the value. SparseSet never
+/// directly compares ValueT, only their indices, so it can map keys to
+/// arbitrary values. SparseSetValTraits computes the index from the value
+/// object. To compute the index from a key, SparseSet uses a separate
+/// KeyFunctorT template argument.
+///
+/// A simple type declaration, SparseSet<Type>, handles these cases:
+/// - unsigned key, identity index, identity value
+/// - unsigned key, identity index, fat value providing getSparseSetIndex()
+///
+/// The type declaration SparseSet<Type, UnaryFunction> handles:
+/// - unsigned key, remapped index, identity value (virtual registers)
+/// - pointer key, pointer-derived index, identity value (node+ID)
+/// - pointer key, pointer-derived index, fat value with getSparseSetIndex()
+///
+/// Only other, unexpected cases require specializing SparseSetValTraits.
+///
+/// For best results, ValueT should not require a destructor.
+///
+template<typename ValueT>
+struct SparseSetValTraits {
+  static unsigned getValIndex(const ValueT &Val) {
+    return Val.getSparseSetIndex();
+  }
+};
+
+/// SparseSetValFunctor - Helper class for selecting SparseSetValTraits. The
+/// generic implementation handles ValueT classes which either provide
+/// getSparseSetIndex() or specialize SparseSetValTraits<>.
+///
+template<typename KeyT, typename ValueT, typename KeyFunctorT>
+struct SparseSetValFunctor {
+  unsigned operator()(const ValueT &Val) const {
+    return SparseSetValTraits<ValueT>::getValIndex(Val);
+  }
+};
+
+/// SparseSetValFunctor<KeyT, KeyT> - Helper class for the common case of
+/// identity key/value sets.
+template<typename KeyT, typename KeyFunctorT>
+struct SparseSetValFunctor<KeyT, KeyT, KeyFunctorT> {
+  unsigned operator()(const KeyT &Key) const {
+    return KeyFunctorT()(Key);
+  }
+};
+
+/// SparseSet - Fast set implmentation for objects that can be identified by
+/// small unsigned keys.
+///
+/// SparseSet allocates memory proportional to the size of the key universe, so
+/// it is not recommended for building composite data structures.  It is useful
+/// for algorithms that require a single set with fast operations.
+///
+/// Compared to DenseSet and DenseMap, SparseSet provides constant-time fast
+/// clear() and iteration as fast as a vector.  The find(), insert(), and
+/// erase() operations are all constant time, and typically faster than a hash
+/// table.  The iteration order doesn't depend on numerical key values, it only
+/// depends on the order of insert() and erase() operations.  When no elements
+/// have been erased, the iteration order is the insertion order.
+///
+/// Compared to BitVector, SparseSet<unsigned> uses 8x-40x more memory, but
+/// offers constant-time clear() and size() operations as well as fast
+/// iteration independent on the size of the universe.
+///
+/// SparseSet contains a dense vector holding all the objects and a sparse
+/// array holding indexes into the dense vector.  Most of the memory is used by
+/// the sparse array which is the size of the key universe.  The SparseT
+/// template parameter provides a space/speed tradeoff for sets holding many
+/// elements.
+///
+/// When SparseT is uint32_t, find() only touches 2 cache lines, but the sparse
+/// array uses 4 x Universe bytes.
+///
+/// When SparseT is uint8_t (the default), find() touches up to 2+[N/256] cache
+/// lines, but the sparse array is 4x smaller.  N is the number of elements in
+/// the set.
+///
+/// For sets that may grow to thousands of elements, SparseT should be set to
+/// uint16_t or uint32_t.
+///
+/// @tparam ValueT      The type of objects in the set.
+/// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT.
+/// @tparam SparseT     An unsigned integer type. See above.
+///
+template<typename ValueT,
+         typename KeyFunctorT = identity<unsigned>,
+         typename SparseT = uint8_t>
+class SparseSet {
+  static_assert(std::numeric_limits<SparseT>::is_integer &&
+                !std::numeric_limits<SparseT>::is_signed,
+                "SparseT must be an unsigned integer type");
+
+  using KeyT = typename KeyFunctorT::argument_type;
+  using DenseT = SmallVector<ValueT, 8>;
+  using size_type = unsigned;
+  DenseT Dense;
+  SparseT *Sparse = nullptr;
+  unsigned Universe = 0;
+  KeyFunctorT KeyIndexOf;
+  SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;
+
+public:
+  using value_type = ValueT;
+  using reference = ValueT &;
+  using const_reference = const ValueT &;
+  using pointer = ValueT *;
+  using const_pointer = const ValueT *;
+
+  SparseSet() = default;
+  SparseSet(const SparseSet &) = delete;
+  SparseSet &operator=(const SparseSet &) = delete;
+  ~SparseSet() { free(Sparse); }
+
+  /// setUniverse - Set the universe size which determines the largest key the
+  /// set can hold.  The universe must be sized before any elements can be
+  /// added.
+  ///
+  /// @param U Universe size. All object keys must be less than U.
+  ///
+  void setUniverse(unsigned U) {
+    // It's not hard to resize the universe on a non-empty set, but it doesn't
+    // seem like a likely use case, so we can add that code when we need it.
+    assert(empty() && "Can only resize universe on an empty map");
+    // Hysteresis prevents needless reallocations.
+    if (U >= Universe/4 && U <= Universe)
+      return;
+    free(Sparse);
+    // The Sparse array doesn't actually need to be initialized, so malloc
+    // would be enough here, but that will cause tools like valgrind to
+    // complain about branching on uninitialized data.
+    Sparse = static_cast<SparseT*>(safe_calloc(U, sizeof(SparseT)));
+    Universe = U;
+  }
+
+  // Import trivial vector stuff from DenseT.
+  using iterator = typename DenseT::iterator;
+  using const_iterator = typename DenseT::const_iterator;
+
+  const_iterator begin() const { return Dense.begin(); }
+  const_iterator end() const { return Dense.end(); }
+  iterator begin() { return Dense.begin(); }
+  iterator end() { return Dense.end(); }
+
+  /// empty - Returns true if the set is empty.
+  ///
+  /// This is not the same as BitVector::empty().
+  ///
+  bool empty() const { return Dense.empty(); }
+
+  /// size - Returns the number of elements in the set.
+  ///
+  /// This is not the same as BitVector::size() which returns the size of the
+  /// universe.
+  ///
+  size_type size() const { return Dense.size(); }
+
+  /// clear - Clears the set.  This is a very fast constant time operation.
+  ///
+  void clear() {
+    // Sparse does not need to be cleared, see find().
+    Dense.clear();
+  }
+
+  /// findIndex - Find an element by its index.
+  ///
+  /// @param   Idx A valid index to find.
+  /// @returns An iterator to the element identified by key, or end().
+  ///
+  iterator findIndex(unsigned Idx) {
+    assert(Idx < Universe && "Key out of range");
+    const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
+    for (unsigned i = Sparse[Idx], e = size(); i < e; i += Stride) {
+      const unsigned FoundIdx = ValIndexOf(Dense[i]);
+      assert(FoundIdx < Universe && "Invalid key in set. Did object mutate?");
+      if (Idx == FoundIdx)
+        return begin() + i;
+      // Stride is 0 when SparseT >= unsigned.  We don't need to loop.
+      if (!Stride)
+        break;
+    }
+    return end();
+  }
+
+  /// find - Find an element by its key.
+  ///
+  /// @param   Key A valid key to find.
+  /// @returns An iterator to the element identified by key, or end().
+  ///
+  iterator find(const KeyT &Key) {
+    return findIndex(KeyIndexOf(Key));
+  }
+
+  const_iterator find(const KeyT &Key) const {
+    return const_cast<SparseSet*>(this)->findIndex(KeyIndexOf(Key));
+  }
+
+  /// count - Returns 1 if this set contains an element identified by Key,
+  /// 0 otherwise.
+  ///
+  size_type count(const KeyT &Key) const {
+    return find(Key) == end() ? 0 : 1;
+  }
+
+  /// insert - Attempts to insert a new element.
+  ///
+  /// If Val is successfully inserted, return (I, true), where I is an iterator
+  /// pointing to the newly inserted element.
+  ///
+  /// If the set already contains an element with the same key as Val, return
+  /// (I, false), where I is an iterator pointing to the existing element.
+  ///
+  /// Insertion invalidates all iterators.
+  ///
+  std::pair<iterator, bool> insert(const ValueT &Val) {
+    unsigned Idx = ValIndexOf(Val);
+    iterator I = findIndex(Idx);
+    if (I != end())
+      return std::make_pair(I, false);
+    Sparse[Idx] = size();
+    Dense.push_back(Val);
+    return std::make_pair(end() - 1, true);
+  }
+
+  /// array subscript - If an element already exists with this key, return it.
+  /// Otherwise, automatically construct a new value from Key, insert it,
+  /// and return the newly inserted element.
+  ValueT &operator[](const KeyT &Key) {
+    return *insert(ValueT(Key)).first;
+  }
+
+  ValueT pop_back_val() {
+    // Sparse does not need to be cleared, see find().
+    return Dense.pop_back_val();
+  }
+
+  /// erase - Erases an existing element identified by a valid iterator.
+  ///
+  /// This invalidates all iterators, but erase() returns an iterator pointing
+  /// to the next element.  This makes it possible to erase selected elements
+  /// while iterating over the set:
+  ///
+  ///   for (SparseSet::iterator I = Set.begin(); I != Set.end();)
+  ///     if (test(*I))
+  ///       I = Set.erase(I);
+  ///     else
+  ///       ++I;
+  ///
+  /// Note that end() changes when elements are erased, unlike std::list.
+  ///
+  iterator erase(iterator I) {
+    assert(unsigned(I - begin()) < size() && "Invalid iterator");
+    if (I != end() - 1) {
+      *I = Dense.back();
+      unsigned BackIdx = ValIndexOf(Dense.back());
+      assert(BackIdx < Universe && "Invalid key in set. Did object mutate?");
+      Sparse[BackIdx] = I - begin();
+    }
+    // This depends on SmallVector::pop_back() not invalidating iterators.
+    // std::vector::pop_back() doesn't give that guarantee.
+    Dense.pop_back();
+    return I;
+  }
+
+  /// erase - Erases an element identified by Key, if it exists.
+  ///
+  /// @param   Key The key identifying the element to erase.
+  /// @returns True when an element was erased, false if no element was found.
+  ///
+  bool erase(const KeyT &Key) {
+    iterator I = find(Key);
+    if (I == end())
+      return false;
+    erase(I);
+    return true;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SPARSESET_H
diff --git a/linux-x64/clang/include/llvm/ADT/Statistic.h b/linux-x64/clang/include/llvm/ADT/Statistic.h
new file mode 100644
index 0000000..3a08997
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/Statistic.h
@@ -0,0 +1,219 @@
+//===-- llvm/ADT/Statistic.h - Easy way to expose stats ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the 'Statistic' class, which is designed to be an easy way
+// to expose various metrics from passes.  These statistics are printed at the
+// end of a run (from llvm_shutdown), when the -stats command line option is
+// passed on the command line.
+//
+// This is useful for reporting information like the number of instructions
+// simplified, optimized or removed by various transformations, like this:
+//
+// static Statistic NumInstsKilled("gcse", "Number of instructions killed");
+//
+// Later, in the code: ++NumInstsKilled;
+//
+// NOTE: Statistics *must* be declared as global variables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STATISTIC_H
+#define LLVM_ADT_STATISTIC_H
+
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Compiler.h"
+#include <atomic>
+#include <memory>
+#include <vector>
+
+// Determine whether statistics should be enabled. We must do it here rather
+// than in CMake because multi-config generators cannot determine this at
+// configure time.
+#if !defined(NDEBUG) || LLVM_FORCE_ENABLE_STATS
+#define LLVM_ENABLE_STATS 1
+#endif
+
+namespace llvm {
+
+class raw_ostream;
+class raw_fd_ostream;
+class StringRef;
+
+class Statistic {
+public:
+  const char *DebugType;
+  const char *Name;
+  const char *Desc;
+  std::atomic<unsigned> Value;
+  std::atomic<bool> Initialized;
+
+  unsigned getValue() const { return Value.load(std::memory_order_relaxed); }
+  const char *getDebugType() const { return DebugType; }
+  const char *getName() const { return Name; }
+  const char *getDesc() const { return Desc; }
+
+  /// construct - This should only be called for non-global statistics.
+  void construct(const char *debugtype, const char *name, const char *desc) {
+    DebugType = debugtype;
+    Name = name;
+    Desc = desc;
+    Value = 0;
+    Initialized = false;
+  }
+
+  // Allow use of this class as the value itself.
+  operator unsigned() const { return getValue(); }
+
+#if LLVM_ENABLE_STATS
+   const Statistic &operator=(unsigned Val) {
+    Value.store(Val, std::memory_order_relaxed);
+    return init();
+  }
+
+  const Statistic &operator++() {
+    Value.fetch_add(1, std::memory_order_relaxed);
+    return init();
+  }
+
+  unsigned operator++(int) {
+    init();
+    return Value.fetch_add(1, std::memory_order_relaxed);
+  }
+
+  const Statistic &operator--() {
+    Value.fetch_sub(1, std::memory_order_relaxed);
+    return init();
+  }
+
+  unsigned operator--(int) {
+    init();
+    return Value.fetch_sub(1, std::memory_order_relaxed);
+  }
+
+  const Statistic &operator+=(unsigned V) {
+    if (V == 0)
+      return *this;
+    Value.fetch_add(V, std::memory_order_relaxed);
+    return init();
+  }
+
+  const Statistic &operator-=(unsigned V) {
+    if (V == 0)
+      return *this;
+    Value.fetch_sub(V, std::memory_order_relaxed);
+    return init();
+  }
+
+  void updateMax(unsigned V) {
+    unsigned PrevMax = Value.load(std::memory_order_relaxed);
+    // Keep trying to update max until we succeed or another thread produces
+    // a bigger max than us.
+    while (V > PrevMax && !Value.compare_exchange_weak(
+                              PrevMax, V, std::memory_order_relaxed)) {
+    }
+    init();
+  }
+
+#else  // Statistics are disabled in release builds.
+
+  const Statistic &operator=(unsigned Val) {
+    return *this;
+  }
+
+  const Statistic &operator++() {
+    return *this;
+  }
+
+  unsigned operator++(int) {
+    return 0;
+  }
+
+  const Statistic &operator--() {
+    return *this;
+  }
+
+  unsigned operator--(int) {
+    return 0;
+  }
+
+  const Statistic &operator+=(const unsigned &V) {
+    return *this;
+  }
+
+  const Statistic &operator-=(const unsigned &V) {
+    return *this;
+  }
+
+  void updateMax(unsigned V) {}
+
+#endif  // LLVM_ENABLE_STATS
+
+protected:
+  Statistic &init() {
+    if (!Initialized.load(std::memory_order_acquire))
+      RegisterStatistic();
+    return *this;
+  }
+
+  void RegisterStatistic();
+};
+
+// STATISTIC - A macro to make definition of statistics really simple.  This
+// automatically passes the DEBUG_TYPE of the file into the statistic.
+#define STATISTIC(VARNAME, DESC)                                               \
+  static llvm::Statistic VARNAME = {DEBUG_TYPE, #VARNAME, DESC, {0}, {false}}
+
+/// \brief Enable the collection and printing of statistics.
+void EnableStatistics(bool PrintOnExit = true);
+
+/// \brief Check if statistics are enabled.
+bool AreStatisticsEnabled();
+
+/// \brief Return a file stream to print our output on.
+std::unique_ptr<raw_fd_ostream> CreateInfoOutputFile();
+
+/// \brief Print statistics to the file returned by CreateInfoOutputFile().
+void PrintStatistics();
+
+/// \brief Print statistics to the given output stream.
+void PrintStatistics(raw_ostream &OS);
+
+/// Print statistics in JSON format. This does include all global timers (\see
+/// Timer, TimerGroup). Note that the timers are cleared after printing and will
+/// not be printed in human readable form or in a second call of
+/// PrintStatisticsJSON().
+void PrintStatisticsJSON(raw_ostream &OS);
+
+/// \brief Get the statistics. This can be used to look up the value of
+/// statistics without needing to parse JSON.
+///
+/// This function does not prevent statistics being updated by other threads
+/// during it's execution. It will return the value at the point that it is
+/// read. However, it will prevent new statistics from registering until it
+/// completes.
+const std::vector<std::pair<StringRef, unsigned>> GetStatistics();
+
+/// \brief Reset the statistics. This can be used to zero and de-register the
+/// statistics in order to measure a compilation.
+///
+/// When this function begins to call destructors prior to returning, all
+/// statistics will be zero and unregistered. However, that might not remain the
+/// case by the time this function finishes returning. Whether update from other
+/// threads are lost or merely deferred until during the function return is
+/// timing sensitive.
+///
+/// Callers who intend to use this to measure statistics for a single
+/// compilation should ensure that no compilations are in progress at the point
+/// this function is called and that only one compilation executes until calling
+/// GetStatistics().
+void ResetStatistics();
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STATISTIC_H
diff --git a/linux-x64/clang/include/llvm/ADT/StringExtras.h b/linux-x64/clang/include/llvm/ADT/StringExtras.h
new file mode 100644
index 0000000..45f6677
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/StringExtras.h
@@ -0,0 +1,367 @@
+//===- llvm/ADT/StringExtras.h - Useful string functions --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains some functions that are useful when dealing with strings.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGEXTRAS_H
+#define LLVM_ADT_STRINGEXTRAS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <iterator>
+#include <string>
+#include <utility>
+
+namespace llvm {
+
+template<typename T> class SmallVectorImpl;
+class raw_ostream;
+
+/// hexdigit - Return the hexadecimal character for the
+/// given number \p X (which should be less than 16).
+inline char hexdigit(unsigned X, bool LowerCase = false) {
+  const char HexChar = LowerCase ? 'a' : 'A';
+  return X < 10 ? '0' + X : HexChar + X - 10;
+}
+
+/// Construct a string ref from a boolean.
+inline StringRef toStringRef(bool B) { return StringRef(B ? "true" : "false"); }
+
+/// Construct a string ref from an array ref of unsigned chars.
+inline StringRef toStringRef(ArrayRef<uint8_t> Input) {
+  return StringRef(reinterpret_cast<const char *>(Input.begin()), Input.size());
+}
+
+/// Construct a string ref from an array ref of unsigned chars.
+inline ArrayRef<uint8_t> arrayRefFromStringRef(StringRef Input) {
+  return {Input.bytes_begin(), Input.bytes_end()};
+}
+
+/// Interpret the given character \p C as a hexadecimal digit and return its
+/// value.
+///
+/// If \p C is not a valid hex digit, -1U is returned.
+inline unsigned hexDigitValue(char C) {
+  if (C >= '0' && C <= '9') return C-'0';
+  if (C >= 'a' && C <= 'f') return C-'a'+10U;
+  if (C >= 'A' && C <= 'F') return C-'A'+10U;
+  return -1U;
+}
+
+/// Checks if character \p C is one of the 10 decimal digits.
+inline bool isDigit(char C) { return C >= '0' && C <= '9'; }
+
+/// Checks if character \p C is a hexadecimal numeric character.
+inline bool isHexDigit(char C) { return hexDigitValue(C) != -1U; }
+
+/// Checks if character \p C is a valid letter as classified by "C" locale.
+inline bool isAlpha(char C) {
+  return ('a' <= C && C <= 'z') || ('A' <= C && C <= 'Z');
+}
+
+/// Checks whether character \p C is either a decimal digit or an uppercase or
+/// lowercase letter as classified by "C" locale.
+inline bool isAlnum(char C) { return isAlpha(C) || isDigit(C); }
+
+/// Returns the corresponding lowercase character if \p x is uppercase.
+inline char toLower(char x) {
+  if (x >= 'A' && x <= 'Z')
+    return x - 'A' + 'a';
+  return x;
+}
+
+/// Returns the corresponding uppercase character if \p x is lowercase.
+inline char toUpper(char x) {
+  if (x >= 'a' && x <= 'z')
+    return x - 'a' + 'A';
+  return x;
+}
+
+inline std::string utohexstr(uint64_t X, bool LowerCase = false) {
+  char Buffer[17];
+  char *BufPtr = std::end(Buffer);
+
+  if (X == 0) *--BufPtr = '0';
+
+  while (X) {
+    unsigned char Mod = static_cast<unsigned char>(X) & 15;
+    *--BufPtr = hexdigit(Mod, LowerCase);
+    X >>= 4;
+  }
+
+  return std::string(BufPtr, std::end(Buffer));
+}
+
+/// Convert buffer \p Input to its hexadecimal representation.
+/// The returned string is double the size of \p Input.
+inline std::string toHex(StringRef Input) {
+  static const char *const LUT = "0123456789ABCDEF";
+  size_t Length = Input.size();
+
+  std::string Output;
+  Output.reserve(2 * Length);
+  for (size_t i = 0; i < Length; ++i) {
+    const unsigned char c = Input[i];
+    Output.push_back(LUT[c >> 4]);
+    Output.push_back(LUT[c & 15]);
+  }
+  return Output;
+}
+
+inline std::string toHex(ArrayRef<uint8_t> Input) {
+  return toHex(toStringRef(Input));
+}
+
+inline uint8_t hexFromNibbles(char MSB, char LSB) {
+  unsigned U1 = hexDigitValue(MSB);
+  unsigned U2 = hexDigitValue(LSB);
+  assert(U1 != -1U && U2 != -1U);
+
+  return static_cast<uint8_t>((U1 << 4) | U2);
+}
+
+/// Convert hexadecimal string \p Input to its binary representation.
+/// The return string is half the size of \p Input.
+inline std::string fromHex(StringRef Input) {
+  if (Input.empty())
+    return std::string();
+
+  std::string Output;
+  Output.reserve((Input.size() + 1) / 2);
+  if (Input.size() % 2 == 1) {
+    Output.push_back(hexFromNibbles('0', Input.front()));
+    Input = Input.drop_front();
+  }
+
+  assert(Input.size() % 2 == 0);
+  while (!Input.empty()) {
+    uint8_t Hex = hexFromNibbles(Input[0], Input[1]);
+    Output.push_back(Hex);
+    Input = Input.drop_front(2);
+  }
+  return Output;
+}
+
+/// \brief Convert the string \p S to an integer of the specified type using
+/// the radix \p Base.  If \p Base is 0, auto-detects the radix.
+/// Returns true if the number was successfully converted, false otherwise.
+template <typename N> bool to_integer(StringRef S, N &Num, unsigned Base = 0) {
+  return !S.getAsInteger(Base, Num);
+}
+
+namespace detail {
+template <typename N>
+inline bool to_float(const Twine &T, N &Num, N (*StrTo)(const char *, char **)) {
+  SmallString<32> Storage;
+  StringRef S = T.toNullTerminatedStringRef(Storage);
+  char *End;
+  N Temp = StrTo(S.data(), &End);
+  if (*End != '\0')
+    return false;
+  Num = Temp;
+  return true;
+}
+}
+
+inline bool to_float(const Twine &T, float &Num) {
+  return detail::to_float(T, Num, strtof);
+}
+
+inline bool to_float(const Twine &T, double &Num) {
+  return detail::to_float(T, Num, strtod);
+}
+
+inline bool to_float(const Twine &T, long double &Num) {
+  return detail::to_float(T, Num, strtold);
+}
+
+inline std::string utostr(uint64_t X, bool isNeg = false) {
+  char Buffer[21];
+  char *BufPtr = std::end(Buffer);
+
+  if (X == 0) *--BufPtr = '0';  // Handle special case...
+
+  while (X) {
+    *--BufPtr = '0' + char(X % 10);
+    X /= 10;
+  }
+
+  if (isNeg) *--BufPtr = '-';   // Add negative sign...
+  return std::string(BufPtr, std::end(Buffer));
+}
+
+inline std::string itostr(int64_t X) {
+  if (X < 0)
+    return utostr(static_cast<uint64_t>(-X), true);
+  else
+    return utostr(static_cast<uint64_t>(X));
+}
+
+/// StrInStrNoCase - Portable version of strcasestr.  Locates the first
+/// occurrence of string 's1' in string 's2', ignoring case.  Returns
+/// the offset of s2 in s1 or npos if s2 cannot be found.
+StringRef::size_type StrInStrNoCase(StringRef s1, StringRef s2);
+
+/// getToken - This function extracts one token from source, ignoring any
+/// leading characters that appear in the Delimiters string, and ending the
+/// token at any of the characters that appear in the Delimiters string.  If
+/// there are no tokens in the source string, an empty string is returned.
+/// The function returns a pair containing the extracted token and the
+/// remaining tail string.
+std::pair<StringRef, StringRef> getToken(StringRef Source,
+                                         StringRef Delimiters = " \t\n\v\f\r");
+
+/// SplitString - Split up the specified string according to the specified
+/// delimiters, appending the result fragments to the output list.
+void SplitString(StringRef Source,
+                 SmallVectorImpl<StringRef> &OutFragments,
+                 StringRef Delimiters = " \t\n\v\f\r");
+
+/// Returns the English suffix for an ordinal integer (-st, -nd, -rd, -th).
+inline StringRef getOrdinalSuffix(unsigned Val) {
+  // It is critically important that we do this perfectly for
+  // user-written sequences with over 100 elements.
+  switch (Val % 100) {
+  case 11:
+  case 12:
+  case 13:
+    return "th";
+  default:
+    switch (Val % 10) {
+      case 1: return "st";
+      case 2: return "nd";
+      case 3: return "rd";
+      default: return "th";
+    }
+  }
+}
+
+/// PrintEscapedString - Print each character of the specified string, escaping
+/// it if it is not printable or if it is an escape char.
+void PrintEscapedString(StringRef Name, raw_ostream &Out);
+
+/// printLowerCase - Print each character as lowercase if it is uppercase.
+void printLowerCase(StringRef String, raw_ostream &Out);
+
+namespace detail {
+
+template <typename IteratorT>
+inline std::string join_impl(IteratorT Begin, IteratorT End,
+                             StringRef Separator, std::input_iterator_tag) {
+  std::string S;
+  if (Begin == End)
+    return S;
+
+  S += (*Begin);
+  while (++Begin != End) {
+    S += Separator;
+    S += (*Begin);
+  }
+  return S;
+}
+
+template <typename IteratorT>
+inline std::string join_impl(IteratorT Begin, IteratorT End,
+                             StringRef Separator, std::forward_iterator_tag) {
+  std::string S;
+  if (Begin == End)
+    return S;
+
+  size_t Len = (std::distance(Begin, End) - 1) * Separator.size();
+  for (IteratorT I = Begin; I != End; ++I)
+    Len += (*Begin).size();
+  S.reserve(Len);
+  S += (*Begin);
+  while (++Begin != End) {
+    S += Separator;
+    S += (*Begin);
+  }
+  return S;
+}
+
+template <typename Sep>
+inline void join_items_impl(std::string &Result, Sep Separator) {}
+
+template <typename Sep, typename Arg>
+inline void join_items_impl(std::string &Result, Sep Separator,
+                            const Arg &Item) {
+  Result += Item;
+}
+
+template <typename Sep, typename Arg1, typename... Args>
+inline void join_items_impl(std::string &Result, Sep Separator, const Arg1 &A1,
+                            Args &&... Items) {
+  Result += A1;
+  Result += Separator;
+  join_items_impl(Result, Separator, std::forward<Args>(Items)...);
+}
+
+inline size_t join_one_item_size(char C) { return 1; }
+inline size_t join_one_item_size(const char *S) { return S ? ::strlen(S) : 0; }
+
+template <typename T> inline size_t join_one_item_size(const T &Str) {
+  return Str.size();
+}
+
+inline size_t join_items_size() { return 0; }
+
+template <typename A1> inline size_t join_items_size(const A1 &A) {
+  return join_one_item_size(A);
+}
+template <typename A1, typename... Args>
+inline size_t join_items_size(const A1 &A, Args &&... Items) {
+  return join_one_item_size(A) + join_items_size(std::forward<Args>(Items)...);
+}
+
+} // end namespace detail
+
+/// Joins the strings in the range [Begin, End), adding Separator between
+/// the elements.
+template <typename IteratorT>
+inline std::string join(IteratorT Begin, IteratorT End, StringRef Separator) {
+  using tag = typename std::iterator_traits<IteratorT>::iterator_category;
+  return detail::join_impl(Begin, End, Separator, tag());
+}
+
+/// Joins the strings in the range [R.begin(), R.end()), adding Separator
+/// between the elements.
+template <typename Range>
+inline std::string join(Range &&R, StringRef Separator) {
+  return join(R.begin(), R.end(), Separator);
+}
+
+/// Joins the strings in the parameter pack \p Items, adding \p Separator
+/// between the elements.  All arguments must be implicitly convertible to
+/// std::string, or there should be an overload of std::string::operator+=()
+/// that accepts the argument explicitly.
+template <typename Sep, typename... Args>
+inline std::string join_items(Sep Separator, Args &&... Items) {
+  std::string Result;
+  if (sizeof...(Items) == 0)
+    return Result;
+
+  size_t NS = detail::join_one_item_size(Separator);
+  size_t NI = detail::join_items_size(std::forward<Args>(Items)...);
+  Result.reserve(NI + (sizeof...(Items) - 1) * NS + 1);
+  detail::join_items_impl(Result, Separator, std::forward<Args>(Items)...);
+  return Result;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGEXTRAS_H
diff --git a/linux-x64/clang/include/llvm/ADT/StringMap.h b/linux-x64/clang/include/llvm/ADT/StringMap.h
new file mode 100644
index 0000000..d34d5ed
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/StringMap.h
@@ -0,0 +1,558 @@
+//===- StringMap.h - String Hash table map interface ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the StringMap class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGMAP_H
+#define LLVM_ADT_STRINGMAP_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <initializer_list>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+template<typename ValueTy> class StringMapConstIterator;
+template<typename ValueTy> class StringMapIterator;
+template<typename ValueTy> class StringMapKeyIterator;
+
+/// StringMapEntryBase - Shared base class of StringMapEntry instances.
+class StringMapEntryBase {
+  size_t StrLen;
+
+public:
+  explicit StringMapEntryBase(size_t Len) : StrLen(Len) {}
+
+  size_t getKeyLength() const { return StrLen; }
+};
+
+/// StringMapImpl - This is the base class of StringMap that is shared among
+/// all of its instantiations.
+class StringMapImpl {
+protected:
+  // Array of NumBuckets pointers to entries, null pointers are holes.
+  // TheTable[NumBuckets] contains a sentinel value for easy iteration. Followed
+  // by an array of the actual hash values as unsigned integers.
+  StringMapEntryBase **TheTable = nullptr;
+  unsigned NumBuckets = 0;
+  unsigned NumItems = 0;
+  unsigned NumTombstones = 0;
+  unsigned ItemSize;
+
+protected:
+  explicit StringMapImpl(unsigned itemSize)
+      : ItemSize(itemSize) {}
+  StringMapImpl(StringMapImpl &&RHS)
+      : TheTable(RHS.TheTable), NumBuckets(RHS.NumBuckets),
+        NumItems(RHS.NumItems), NumTombstones(RHS.NumTombstones),
+        ItemSize(RHS.ItemSize) {
+    RHS.TheTable = nullptr;
+    RHS.NumBuckets = 0;
+    RHS.NumItems = 0;
+    RHS.NumTombstones = 0;
+  }
+
+  StringMapImpl(unsigned InitSize, unsigned ItemSize);
+  unsigned RehashTable(unsigned BucketNo = 0);
+
+  /// LookupBucketFor - Look up the bucket that the specified string should end
+  /// up in.  If it already exists as a key in the map, the Item pointer for the
+  /// specified bucket will be non-null.  Otherwise, it will be null.  In either
+  /// case, the FullHashValue field of the bucket will be set to the hash value
+  /// of the string.
+  unsigned LookupBucketFor(StringRef Key);
+
+  /// FindKey - Look up the bucket that contains the specified key. If it exists
+  /// in the map, return the bucket number of the key.  Otherwise return -1.
+  /// This does not modify the map.
+  int FindKey(StringRef Key) const;
+
+  /// RemoveKey - Remove the specified StringMapEntry from the table, but do not
+  /// delete it.  This aborts if the value isn't in the table.
+  void RemoveKey(StringMapEntryBase *V);
+
+  /// RemoveKey - Remove the StringMapEntry for the specified key from the
+  /// table, returning it.  If the key is not in the table, this returns null.
+  StringMapEntryBase *RemoveKey(StringRef Key);
+
+  /// Allocate the table with the specified number of buckets and otherwise
+  /// setup the map as empty.
+  void init(unsigned Size);
+
+public:
+  static StringMapEntryBase *getTombstoneVal() {
+    uintptr_t Val = static_cast<uintptr_t>(-1);
+    Val <<= PointerLikeTypeTraits<StringMapEntryBase *>::NumLowBitsAvailable;
+    return reinterpret_cast<StringMapEntryBase *>(Val);
+  }
+
+  unsigned getNumBuckets() const { return NumBuckets; }
+  unsigned getNumItems() const { return NumItems; }
+
+  bool empty() const { return NumItems == 0; }
+  unsigned size() const { return NumItems; }
+
+  void swap(StringMapImpl &Other) {
+    std::swap(TheTable, Other.TheTable);
+    std::swap(NumBuckets, Other.NumBuckets);
+    std::swap(NumItems, Other.NumItems);
+    std::swap(NumTombstones, Other.NumTombstones);
+  }
+};
+
+/// StringMapEntry - This is used to represent one value that is inserted into
+/// a StringMap.  It contains the Value itself and the key: the string length
+/// and data.
+template<typename ValueTy>
+class StringMapEntry : public StringMapEntryBase {
+public:
+  ValueTy second;
+
+  explicit StringMapEntry(size_t strLen)
+    : StringMapEntryBase(strLen), second() {}
+  template <typename... InitTy>
+  StringMapEntry(size_t strLen, InitTy &&... InitVals)
+      : StringMapEntryBase(strLen), second(std::forward<InitTy>(InitVals)...) {}
+  StringMapEntry(StringMapEntry &E) = delete;
+
+  StringRef getKey() const {
+    return StringRef(getKeyData(), getKeyLength());
+  }
+
+  const ValueTy &getValue() const { return second; }
+  ValueTy &getValue() { return second; }
+
+  void setValue(const ValueTy &V) { second = V; }
+
+  /// getKeyData - Return the start of the string data that is the key for this
+  /// value.  The string data is always stored immediately after the
+  /// StringMapEntry object.
+  const char *getKeyData() const {return reinterpret_cast<const char*>(this+1);}
+
+  StringRef first() const { return StringRef(getKeyData(), getKeyLength()); }
+
+  /// Create a StringMapEntry for the specified key construct the value using
+  /// \p InitiVals.
+  template <typename AllocatorTy, typename... InitTy>
+  static StringMapEntry *Create(StringRef Key, AllocatorTy &Allocator,
+                                InitTy &&... InitVals) {
+    size_t KeyLength = Key.size();
+
+    // Allocate a new item with space for the string at the end and a null
+    // terminator.
+    size_t AllocSize = sizeof(StringMapEntry) + KeyLength + 1;
+    size_t Alignment = alignof(StringMapEntry);
+
+    StringMapEntry *NewItem =
+      static_cast<StringMapEntry*>(Allocator.Allocate(AllocSize,Alignment));
+
+    if (NewItem == nullptr)
+      report_bad_alloc_error("Allocation of StringMap entry failed.");
+
+    // Construct the value.
+    new (NewItem) StringMapEntry(KeyLength, std::forward<InitTy>(InitVals)...);
+
+    // Copy the string information.
+    char *StrBuffer = const_cast<char*>(NewItem->getKeyData());
+    if (KeyLength > 0)
+      memcpy(StrBuffer, Key.data(), KeyLength);
+    StrBuffer[KeyLength] = 0;  // Null terminate for convenience of clients.
+    return NewItem;
+  }
+
+  /// Create - Create a StringMapEntry with normal malloc/free.
+  template <typename... InitType>
+  static StringMapEntry *Create(StringRef Key, InitType &&... InitVal) {
+    MallocAllocator A;
+    return Create(Key, A, std::forward<InitType>(InitVal)...);
+  }
+
+  static StringMapEntry *Create(StringRef Key) {
+    return Create(Key, ValueTy());
+  }
+
+  /// GetStringMapEntryFromKeyData - Given key data that is known to be embedded
+  /// into a StringMapEntry, return the StringMapEntry itself.
+  static StringMapEntry &GetStringMapEntryFromKeyData(const char *KeyData) {
+    char *Ptr = const_cast<char*>(KeyData) - sizeof(StringMapEntry<ValueTy>);
+    return *reinterpret_cast<StringMapEntry*>(Ptr);
+  }
+
+  /// Destroy - Destroy this StringMapEntry, releasing memory back to the
+  /// specified allocator.
+  template<typename AllocatorTy>
+  void Destroy(AllocatorTy &Allocator) {
+    // Free memory referenced by the item.
+    size_t AllocSize = sizeof(StringMapEntry) + getKeyLength() + 1;
+    this->~StringMapEntry();
+    Allocator.Deallocate(static_cast<void *>(this), AllocSize);
+  }
+
+  /// Destroy this object, releasing memory back to the malloc allocator.
+  void Destroy() {
+    MallocAllocator A;
+    Destroy(A);
+  }
+};
+
+/// StringMap - This is an unconventional map that is specialized for handling
+/// keys that are "strings", which are basically ranges of bytes. This does some
+/// funky memory allocation and hashing things to make it extremely efficient,
+/// storing the string data *after* the value in the map.
+template<typename ValueTy, typename AllocatorTy = MallocAllocator>
+class StringMap : public StringMapImpl {
+  AllocatorTy Allocator;
+
+public:
+  using MapEntryTy = StringMapEntry<ValueTy>;
+
+  StringMap() : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {}
+
+  explicit StringMap(unsigned InitialSize)
+    : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {}
+
+  explicit StringMap(AllocatorTy A)
+    : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), Allocator(A) {}
+
+  StringMap(unsigned InitialSize, AllocatorTy A)
+    : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))),
+      Allocator(A) {}
+
+  StringMap(std::initializer_list<std::pair<StringRef, ValueTy>> List)
+      : StringMapImpl(List.size(), static_cast<unsigned>(sizeof(MapEntryTy))) {
+    for (const auto &P : List) {
+      insert(P);
+    }
+  }
+
+  StringMap(StringMap &&RHS)
+      : StringMapImpl(std::move(RHS)), Allocator(std::move(RHS.Allocator)) {}
+
+  StringMap(const StringMap &RHS) :
+    StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))),
+    Allocator(RHS.Allocator) {
+    if (RHS.empty())
+      return;
+
+    // Allocate TheTable of the same size as RHS's TheTable, and set the
+    // sentinel appropriately (and NumBuckets).
+    init(RHS.NumBuckets);
+    unsigned *HashTable = (unsigned *)(TheTable + NumBuckets + 1),
+             *RHSHashTable = (unsigned *)(RHS.TheTable + NumBuckets + 1);
+
+    NumItems = RHS.NumItems;
+    NumTombstones = RHS.NumTombstones;
+    for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
+      StringMapEntryBase *Bucket = RHS.TheTable[I];
+      if (!Bucket || Bucket == getTombstoneVal()) {
+        TheTable[I] = Bucket;
+        continue;
+      }
+
+      TheTable[I] = MapEntryTy::Create(
+          static_cast<MapEntryTy *>(Bucket)->getKey(), Allocator,
+          static_cast<MapEntryTy *>(Bucket)->getValue());
+      HashTable[I] = RHSHashTable[I];
+    }
+
+    // Note that here we've copied everything from the RHS into this object,
+    // tombstones included. We could, instead, have re-probed for each key to
+    // instantiate this new object without any tombstone buckets. The
+    // assumption here is that items are rarely deleted from most StringMaps,
+    // and so tombstones are rare, so the cost of re-probing for all inputs is
+    // not worthwhile.
+  }
+
+  StringMap &operator=(StringMap RHS) {
+    StringMapImpl::swap(RHS);
+    std::swap(Allocator, RHS.Allocator);
+    return *this;
+  }
+
+  ~StringMap() {
+    // Delete all the elements in the map, but don't reset the elements
+    // to default values.  This is a copy of clear(), but avoids unnecessary
+    // work not required in the destructor.
+    if (!empty()) {
+      for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
+        StringMapEntryBase *Bucket = TheTable[I];
+        if (Bucket && Bucket != getTombstoneVal()) {
+          static_cast<MapEntryTy*>(Bucket)->Destroy(Allocator);
+        }
+      }
+    }
+    free(TheTable);
+  }
+
+  AllocatorTy &getAllocator() { return Allocator; }
+  const AllocatorTy &getAllocator() const { return Allocator; }
+
+  using key_type = const char*;
+  using mapped_type = ValueTy;
+  using value_type = StringMapEntry<ValueTy>;
+  using size_type = size_t;
+
+  using const_iterator = StringMapConstIterator<ValueTy>;
+  using iterator = StringMapIterator<ValueTy>;
+
+  iterator begin() {
+    return iterator(TheTable, NumBuckets == 0);
+  }
+  iterator end() {
+    return iterator(TheTable+NumBuckets, true);
+  }
+  const_iterator begin() const {
+    return const_iterator(TheTable, NumBuckets == 0);
+  }
+  const_iterator end() const {
+    return const_iterator(TheTable+NumBuckets, true);
+  }
+
+  iterator_range<StringMapKeyIterator<ValueTy>> keys() const {
+    return make_range(StringMapKeyIterator<ValueTy>(begin()),
+                      StringMapKeyIterator<ValueTy>(end()));
+  }
+
+  iterator find(StringRef Key) {
+    int Bucket = FindKey(Key);
+    if (Bucket == -1) return end();
+    return iterator(TheTable+Bucket, true);
+  }
+
+  const_iterator find(StringRef Key) const {
+    int Bucket = FindKey(Key);
+    if (Bucket == -1) return end();
+    return const_iterator(TheTable+Bucket, true);
+  }
+
+  /// lookup - Return the entry for the specified key, or a default
+  /// constructed value if no such entry exists.
+  ValueTy lookup(StringRef Key) const {
+    const_iterator it = find(Key);
+    if (it != end())
+      return it->second;
+    return ValueTy();
+  }
+
+  /// Lookup the ValueTy for the \p Key, or create a default constructed value
+  /// if the key is not in the map.
+  ValueTy &operator[](StringRef Key) { return try_emplace(Key).first->second; }
+
+  /// count - Return 1 if the element is in the map, 0 otherwise.
+  size_type count(StringRef Key) const {
+    return find(Key) == end() ? 0 : 1;
+  }
+
+  /// insert - Insert the specified key/value pair into the map.  If the key
+  /// already exists in the map, return false and ignore the request, otherwise
+  /// insert it and return true.
+  bool insert(MapEntryTy *KeyValue) {
+    unsigned BucketNo = LookupBucketFor(KeyValue->getKey());
+    StringMapEntryBase *&Bucket = TheTable[BucketNo];
+    if (Bucket && Bucket != getTombstoneVal())
+      return false;  // Already exists in map.
+
+    if (Bucket == getTombstoneVal())
+      --NumTombstones;
+    Bucket = KeyValue;
+    ++NumItems;
+    assert(NumItems + NumTombstones <= NumBuckets);
+
+    RehashTable();
+    return true;
+  }
+
+  /// insert - Inserts the specified key/value pair into the map if the key
+  /// isn't already in the map. The bool component of the returned pair is true
+  /// if and only if the insertion takes place, and the iterator component of
+  /// the pair points to the element with key equivalent to the key of the pair.
+  std::pair<iterator, bool> insert(std::pair<StringRef, ValueTy> KV) {
+    return try_emplace(KV.first, std::move(KV.second));
+  }
+
+  /// Emplace a new element for the specified key into the map if the key isn't
+  /// already in the map. The bool component of the returned pair is true
+  /// if and only if the insertion takes place, and the iterator component of
+  /// the pair points to the element with key equivalent to the key of the pair.
+  template <typename... ArgsTy>
+  std::pair<iterator, bool> try_emplace(StringRef Key, ArgsTy &&... Args) {
+    unsigned BucketNo = LookupBucketFor(Key);
+    StringMapEntryBase *&Bucket = TheTable[BucketNo];
+    if (Bucket && Bucket != getTombstoneVal())
+      return std::make_pair(iterator(TheTable + BucketNo, false),
+                            false); // Already exists in map.
+
+    if (Bucket == getTombstoneVal())
+      --NumTombstones;
+    Bucket = MapEntryTy::Create(Key, Allocator, std::forward<ArgsTy>(Args)...);
+    ++NumItems;
+    assert(NumItems + NumTombstones <= NumBuckets);
+
+    BucketNo = RehashTable(BucketNo);
+    return std::make_pair(iterator(TheTable + BucketNo, false), true);
+  }
+
+  // clear - Empties out the StringMap
+  void clear() {
+    if (empty()) return;
+
+    // Zap all values, resetting the keys back to non-present (not tombstone),
+    // which is safe because we're removing all elements.
+    for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
+      StringMapEntryBase *&Bucket = TheTable[I];
+      if (Bucket && Bucket != getTombstoneVal()) {
+        static_cast<MapEntryTy*>(Bucket)->Destroy(Allocator);
+      }
+      Bucket = nullptr;
+    }
+
+    NumItems = 0;
+    NumTombstones = 0;
+  }
+
+  /// remove - Remove the specified key/value pair from the map, but do not
+  /// erase it.  This aborts if the key is not in the map.
+  void remove(MapEntryTy *KeyValue) {
+    RemoveKey(KeyValue);
+  }
+
+  void erase(iterator I) {
+    MapEntryTy &V = *I;
+    remove(&V);
+    V.Destroy(Allocator);
+  }
+
+  bool erase(StringRef Key) {
+    iterator I = find(Key);
+    if (I == end()) return false;
+    erase(I);
+    return true;
+  }
+};
+
+template <typename DerivedTy, typename ValueTy>
+class StringMapIterBase
+    : public iterator_facade_base<DerivedTy, std::forward_iterator_tag,
+                                  ValueTy> {
+protected:
+  StringMapEntryBase **Ptr = nullptr;
+
+public:
+  StringMapIterBase() = default;
+
+  explicit StringMapIterBase(StringMapEntryBase **Bucket,
+                             bool NoAdvance = false)
+      : Ptr(Bucket) {
+    if (!NoAdvance) AdvancePastEmptyBuckets();
+  }
+
+  DerivedTy &operator=(const DerivedTy &Other) {
+    Ptr = Other.Ptr;
+    return static_cast<DerivedTy &>(*this);
+  }
+
+  bool operator==(const DerivedTy &RHS) const { return Ptr == RHS.Ptr; }
+
+  DerivedTy &operator++() { // Preincrement
+    ++Ptr;
+    AdvancePastEmptyBuckets();
+    return static_cast<DerivedTy &>(*this);
+  }
+
+  DerivedTy operator++(int) { // Post-increment
+    DerivedTy Tmp(Ptr);
+    ++*this;
+    return Tmp;
+  }
+
+private:
+  void AdvancePastEmptyBuckets() {
+    while (*Ptr == nullptr || *Ptr == StringMapImpl::getTombstoneVal())
+      ++Ptr;
+  }
+};
+
+template <typename ValueTy>
+class StringMapConstIterator
+    : public StringMapIterBase<StringMapConstIterator<ValueTy>,
+                               const StringMapEntry<ValueTy>> {
+  using base = StringMapIterBase<StringMapConstIterator<ValueTy>,
+                                 const StringMapEntry<ValueTy>>;
+
+public:
+  StringMapConstIterator() = default;
+  explicit StringMapConstIterator(StringMapEntryBase **Bucket,
+                                  bool NoAdvance = false)
+      : base(Bucket, NoAdvance) {}
+
+  const StringMapEntry<ValueTy> &operator*() const {
+    return *static_cast<const StringMapEntry<ValueTy> *>(*this->Ptr);
+  }
+};
+
+template <typename ValueTy>
+class StringMapIterator : public StringMapIterBase<StringMapIterator<ValueTy>,
+                                                   StringMapEntry<ValueTy>> {
+  using base =
+      StringMapIterBase<StringMapIterator<ValueTy>, StringMapEntry<ValueTy>>;
+
+public:
+  StringMapIterator() = default;
+  explicit StringMapIterator(StringMapEntryBase **Bucket,
+                             bool NoAdvance = false)
+      : base(Bucket, NoAdvance) {}
+
+  StringMapEntry<ValueTy> &operator*() const {
+    return *static_cast<StringMapEntry<ValueTy> *>(*this->Ptr);
+  }
+
+  operator StringMapConstIterator<ValueTy>() const {
+    return StringMapConstIterator<ValueTy>(this->Ptr, true);
+  }
+};
+
+template <typename ValueTy>
+class StringMapKeyIterator
+    : public iterator_adaptor_base<StringMapKeyIterator<ValueTy>,
+                                   StringMapConstIterator<ValueTy>,
+                                   std::forward_iterator_tag, StringRef> {
+  using base = iterator_adaptor_base<StringMapKeyIterator<ValueTy>,
+                                     StringMapConstIterator<ValueTy>,
+                                     std::forward_iterator_tag, StringRef>;
+
+public:
+  StringMapKeyIterator() = default;
+  explicit StringMapKeyIterator(StringMapConstIterator<ValueTy> Iter)
+      : base(std::move(Iter)) {}
+
+  StringRef &operator*() {
+    Key = this->wrapped()->getKey();
+    return Key;
+  }
+
+private:
+  StringRef Key;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGMAP_H
diff --git a/linux-x64/clang/include/llvm/ADT/StringRef.h b/linux-x64/clang/include/llvm/ADT/StringRef.h
new file mode 100644
index 0000000..3d2417a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/StringRef.h
@@ -0,0 +1,925 @@
+//===- StringRef.h - Constant String Reference Wrapper ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGREF_H
+#define LLVM_ADT_STRINGREF_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstring>
+#include <limits>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+  class APInt;
+  class hash_code;
+  template <typename T> class SmallVectorImpl;
+  class StringRef;
+
+  /// Helper functions for StringRef::getAsInteger.
+  bool getAsUnsignedInteger(StringRef Str, unsigned Radix,
+                            unsigned long long &Result);
+
+  bool getAsSignedInteger(StringRef Str, unsigned Radix, long long &Result);
+
+  bool consumeUnsignedInteger(StringRef &Str, unsigned Radix,
+                              unsigned long long &Result);
+  bool consumeSignedInteger(StringRef &Str, unsigned Radix, long long &Result);
+
+  /// StringRef - Represent a constant reference to a string, i.e. a character
+  /// array and a length, which need not be null terminated.
+  ///
+  /// This class does not own the string data, it is expected to be used in
+  /// situations where the character data resides in some other buffer, whose
+  /// lifetime extends past that of the StringRef. For this reason, it is not in
+  /// general safe to store a StringRef.
+  class StringRef {
+  public:
+    static const size_t npos = ~size_t(0);
+
+    using iterator = const char *;
+    using const_iterator = const char *;
+    using size_type = size_t;
+
+  private:
+    /// The start of the string, in an external buffer.
+    const char *Data = nullptr;
+
+    /// The length of the string.
+    size_t Length = 0;
+
+    // Workaround memcmp issue with null pointers (undefined behavior)
+    // by providing a specialized version
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    static int compareMemory(const char *Lhs, const char *Rhs, size_t Length) {
+      if (Length == 0) { return 0; }
+      return ::memcmp(Lhs,Rhs,Length);
+    }
+
+  public:
+    /// @name Constructors
+    /// @{
+
+    /// Construct an empty string ref.
+    /*implicit*/ StringRef() = default;
+
+    /// Disable conversion from nullptr.  This prevents things like
+    /// if (S == nullptr)
+    StringRef(std::nullptr_t) = delete;
+
+    /// Construct a string ref from a cstring.
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    /*implicit*/ StringRef(const char *Str)
+        : Data(Str), Length(Str ? ::strlen(Str) : 0) {}
+
+    /// Construct a string ref from a pointer and length.
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    /*implicit*/ constexpr StringRef(const char *data, size_t length)
+        : Data(data), Length(length) {}
+
+    /// Construct a string ref from an std::string.
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    /*implicit*/ StringRef(const std::string &Str)
+      : Data(Str.data()), Length(Str.length()) {}
+
+    static StringRef withNullAsEmpty(const char *data) {
+      return StringRef(data ? data : "");
+    }
+
+    /// @}
+    /// @name Iterators
+    /// @{
+
+    iterator begin() const { return Data; }
+
+    iterator end() const { return Data + Length; }
+
+    const unsigned char *bytes_begin() const {
+      return reinterpret_cast<const unsigned char *>(begin());
+    }
+    const unsigned char *bytes_end() const {
+      return reinterpret_cast<const unsigned char *>(end());
+    }
+    iterator_range<const unsigned char *> bytes() const {
+      return make_range(bytes_begin(), bytes_end());
+    }
+
+    /// @}
+    /// @name String Operations
+    /// @{
+
+    /// data - Get a pointer to the start of the string (which may not be null
+    /// terminated).
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    const char *data() const { return Data; }
+
+    /// empty - Check if the string is empty.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool empty() const { return Length == 0; }
+
+    /// size - Get the string size.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    size_t size() const { return Length; }
+
+    /// front - Get the first character in the string.
+    LLVM_NODISCARD
+    char front() const {
+      assert(!empty());
+      return Data[0];
+    }
+
+    /// back - Get the last character in the string.
+    LLVM_NODISCARD
+    char back() const {
+      assert(!empty());
+      return Data[Length-1];
+    }
+
+    // copy - Allocate copy in Allocator and return StringRef to it.
+    template <typename Allocator>
+    LLVM_NODISCARD StringRef copy(Allocator &A) const {
+      // Don't request a length 0 copy from the allocator.
+      if (empty())
+        return StringRef();
+      char *S = A.template Allocate<char>(Length);
+      std::copy(begin(), end(), S);
+      return StringRef(S, Length);
+    }
+
+    /// equals - Check for string equality, this is more efficient than
+    /// compare() when the relative ordering of inequal strings isn't needed.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool equals(StringRef RHS) const {
+      return (Length == RHS.Length &&
+              compareMemory(Data, RHS.Data, RHS.Length) == 0);
+    }
+
+    /// equals_lower - Check for string equality, ignoring case.
+    LLVM_NODISCARD
+    bool equals_lower(StringRef RHS) const {
+      return Length == RHS.Length && compare_lower(RHS) == 0;
+    }
+
+    /// compare - Compare two strings; the result is -1, 0, or 1 if this string
+    /// is lexicographically less than, equal to, or greater than the \p RHS.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    int compare(StringRef RHS) const {
+      // Check the prefix for a mismatch.
+      if (int Res = compareMemory(Data, RHS.Data, std::min(Length, RHS.Length)))
+        return Res < 0 ? -1 : 1;
+
+      // Otherwise the prefixes match, so we only need to check the lengths.
+      if (Length == RHS.Length)
+        return 0;
+      return Length < RHS.Length ? -1 : 1;
+    }
+
+    /// compare_lower - Compare two strings, ignoring case.
+    LLVM_NODISCARD
+    int compare_lower(StringRef RHS) const;
+
+    /// compare_numeric - Compare two strings, treating sequences of digits as
+    /// numbers.
+    LLVM_NODISCARD
+    int compare_numeric(StringRef RHS) const;
+
+    /// \brief Determine the edit distance between this string and another
+    /// string.
+    ///
+    /// \param Other the string to compare this string against.
+    ///
+    /// \param AllowReplacements whether to allow character
+    /// replacements (change one character into another) as a single
+    /// operation, rather than as two operations (an insertion and a
+    /// removal).
+    ///
+    /// \param MaxEditDistance If non-zero, the maximum edit distance that
+    /// this routine is allowed to compute. If the edit distance will exceed
+    /// that maximum, returns \c MaxEditDistance+1.
+    ///
+    /// \returns the minimum number of character insertions, removals,
+    /// or (if \p AllowReplacements is \c true) replacements needed to
+    /// transform one of the given strings into the other. If zero,
+    /// the strings are identical.
+    LLVM_NODISCARD
+    unsigned edit_distance(StringRef Other, bool AllowReplacements = true,
+                           unsigned MaxEditDistance = 0) const;
+
+    /// str - Get the contents as an std::string.
+    LLVM_NODISCARD
+    std::string str() const {
+      if (!Data) return std::string();
+      return std::string(Data, Length);
+    }
+
+    /// @}
+    /// @name Operator Overloads
+    /// @{
+
+    LLVM_NODISCARD
+    char operator[](size_t Index) const {
+      assert(Index < Length && "Invalid index!");
+      return Data[Index];
+    }
+
+    /// Disallow accidental assignment from a temporary std::string.
+    ///
+    /// The declaration here is extra complicated so that `stringRef = {}`
+    /// and `stringRef = "abc"` continue to select the move assignment operator.
+    template <typename T>
+    typename std::enable_if<std::is_same<T, std::string>::value,
+                            StringRef>::type &
+    operator=(T &&Str) = delete;
+
+    /// @}
+    /// @name Type Conversions
+    /// @{
+
+    operator std::string() const {
+      return str();
+    }
+
+    /// @}
+    /// @name String Predicates
+    /// @{
+
+    /// Check if this string starts with the given \p Prefix.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool startswith(StringRef Prefix) const {
+      return Length >= Prefix.Length &&
+             compareMemory(Data, Prefix.Data, Prefix.Length) == 0;
+    }
+
+    /// Check if this string starts with the given \p Prefix, ignoring case.
+    LLVM_NODISCARD
+    bool startswith_lower(StringRef Prefix) const;
+
+    /// Check if this string ends with the given \p Suffix.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool endswith(StringRef Suffix) const {
+      return Length >= Suffix.Length &&
+        compareMemory(end() - Suffix.Length, Suffix.Data, Suffix.Length) == 0;
+    }
+
+    /// Check if this string ends with the given \p Suffix, ignoring case.
+    LLVM_NODISCARD
+    bool endswith_lower(StringRef Suffix) const;
+
+    /// @}
+    /// @name String Searching
+    /// @{
+
+    /// Search for the first character \p C in the string.
+    ///
+    /// \returns The index of the first occurrence of \p C, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    size_t find(char C, size_t From = 0) const {
+      size_t FindBegin = std::min(From, Length);
+      if (FindBegin < Length) { // Avoid calling memchr with nullptr.
+        // Just forward to memchr, which is faster than a hand-rolled loop.
+        if (const void *P = ::memchr(Data + FindBegin, C, Length - FindBegin))
+          return static_cast<const char *>(P) - Data;
+      }
+      return npos;
+    }
+
+    /// Search for the first character \p C in the string, ignoring case.
+    ///
+    /// \returns The index of the first occurrence of \p C, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t find_lower(char C, size_t From = 0) const;
+
+    /// Search for the first character satisfying the predicate \p F
+    ///
+    /// \returns The index of the first character satisfying \p F starting from
+    /// \p From, or npos if not found.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    size_t find_if(function_ref<bool(char)> F, size_t From = 0) const {
+      StringRef S = drop_front(From);
+      while (!S.empty()) {
+        if (F(S.front()))
+          return size() - S.size();
+        S = S.drop_front();
+      }
+      return npos;
+    }
+
+    /// Search for the first character not satisfying the predicate \p F
+    ///
+    /// \returns The index of the first character not satisfying \p F starting
+    /// from \p From, or npos if not found.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    size_t find_if_not(function_ref<bool(char)> F, size_t From = 0) const {
+      return find_if([F](char c) { return !F(c); }, From);
+    }
+
+    /// Search for the first string \p Str in the string.
+    ///
+    /// \returns The index of the first occurrence of \p Str, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t find(StringRef Str, size_t From = 0) const;
+
+    /// Search for the first string \p Str in the string, ignoring case.
+    ///
+    /// \returns The index of the first occurrence of \p Str, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t find_lower(StringRef Str, size_t From = 0) const;
+
+    /// Search for the last character \p C in the string.
+    ///
+    /// \returns The index of the last occurrence of \p C, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t rfind(char C, size_t From = npos) const {
+      From = std::min(From, Length);
+      size_t i = From;
+      while (i != 0) {
+        --i;
+        if (Data[i] == C)
+          return i;
+      }
+      return npos;
+    }
+
+    /// Search for the last character \p C in the string, ignoring case.
+    ///
+    /// \returns The index of the last occurrence of \p C, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t rfind_lower(char C, size_t From = npos) const;
+
+    /// Search for the last string \p Str in the string.
+    ///
+    /// \returns The index of the last occurrence of \p Str, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t rfind(StringRef Str) const;
+
+    /// Search for the last string \p Str in the string, ignoring case.
+    ///
+    /// \returns The index of the last occurrence of \p Str, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t rfind_lower(StringRef Str) const;
+
+    /// Find the first character in the string that is \p C, or npos if not
+    /// found. Same as find.
+    LLVM_NODISCARD
+    size_t find_first_of(char C, size_t From = 0) const {
+      return find(C, From);
+    }
+
+    /// Find the first character in the string that is in \p Chars, or npos if
+    /// not found.
+    ///
+    /// Complexity: O(size() + Chars.size())
+    LLVM_NODISCARD
+    size_t find_first_of(StringRef Chars, size_t From = 0) const;
+
+    /// Find the first character in the string that is not \p C or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t find_first_not_of(char C, size_t From = 0) const;
+
+    /// Find the first character in the string that is not in the string
+    /// \p Chars, or npos if not found.
+    ///
+    /// Complexity: O(size() + Chars.size())
+    LLVM_NODISCARD
+    size_t find_first_not_of(StringRef Chars, size_t From = 0) const;
+
+    /// Find the last character in the string that is \p C, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t find_last_of(char C, size_t From = npos) const {
+      return rfind(C, From);
+    }
+
+    /// Find the last character in the string that is in \p C, or npos if not
+    /// found.
+    ///
+    /// Complexity: O(size() + Chars.size())
+    LLVM_NODISCARD
+    size_t find_last_of(StringRef Chars, size_t From = npos) const;
+
+    /// Find the last character in the string that is not \p C, or npos if not
+    /// found.
+    LLVM_NODISCARD
+    size_t find_last_not_of(char C, size_t From = npos) const;
+
+    /// Find the last character in the string that is not in \p Chars, or
+    /// npos if not found.
+    ///
+    /// Complexity: O(size() + Chars.size())
+    LLVM_NODISCARD
+    size_t find_last_not_of(StringRef Chars, size_t From = npos) const;
+
+    /// Return true if the given string is a substring of *this, and false
+    /// otherwise.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool contains(StringRef Other) const { return find(Other) != npos; }
+
+    /// Return true if the given character is contained in *this, and false
+    /// otherwise.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool contains(char C) const { return find_first_of(C) != npos; }
+
+    /// Return true if the given string is a substring of *this, and false
+    /// otherwise.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool contains_lower(StringRef Other) const {
+      return find_lower(Other) != npos;
+    }
+
+    /// Return true if the given character is contained in *this, and false
+    /// otherwise.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool contains_lower(char C) const { return find_lower(C) != npos; }
+
+    /// @}
+    /// @name Helpful Algorithms
+    /// @{
+
+    /// Return the number of occurrences of \p C in the string.
+    LLVM_NODISCARD
+    size_t count(char C) const {
+      size_t Count = 0;
+      for (size_t i = 0, e = Length; i != e; ++i)
+        if (Data[i] == C)
+          ++Count;
+      return Count;
+    }
+
+    /// Return the number of non-overlapped occurrences of \p Str in
+    /// the string.
+    size_t count(StringRef Str) const;
+
+    /// Parse the current string as an integer of the specified radix.  If
+    /// \p Radix is specified as zero, this does radix autosensing using
+    /// extended C rules: 0 is octal, 0x is hex, 0b is binary.
+    ///
+    /// If the string is invalid or if only a subset of the string is valid,
+    /// this returns true to signify the error.  The string is considered
+    /// erroneous if empty or if it overflows T.
+    template <typename T>
+    typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type
+    getAsInteger(unsigned Radix, T &Result) const {
+      long long LLVal;
+      if (getAsSignedInteger(*this, Radix, LLVal) ||
+            static_cast<T>(LLVal) != LLVal)
+        return true;
+      Result = LLVal;
+      return false;
+    }
+
+    template <typename T>
+    typename std::enable_if<!std::numeric_limits<T>::is_signed, bool>::type
+    getAsInteger(unsigned Radix, T &Result) const {
+      unsigned long long ULLVal;
+      // The additional cast to unsigned long long is required to avoid the
+      // Visual C++ warning C4805: '!=' : unsafe mix of type 'bool' and type
+      // 'unsigned __int64' when instantiating getAsInteger with T = bool.
+      if (getAsUnsignedInteger(*this, Radix, ULLVal) ||
+          static_cast<unsigned long long>(static_cast<T>(ULLVal)) != ULLVal)
+        return true;
+      Result = ULLVal;
+      return false;
+    }
+
+    /// Parse the current string as an integer of the specified radix.  If
+    /// \p Radix is specified as zero, this does radix autosensing using
+    /// extended C rules: 0 is octal, 0x is hex, 0b is binary.
+    ///
+    /// If the string does not begin with a number of the specified radix,
+    /// this returns true to signify the error. The string is considered
+    /// erroneous if empty or if it overflows T.
+    /// The portion of the string representing the discovered numeric value
+    /// is removed from the beginning of the string.
+    template <typename T>
+    typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type
+    consumeInteger(unsigned Radix, T &Result) {
+      long long LLVal;
+      if (consumeSignedInteger(*this, Radix, LLVal) ||
+          static_cast<long long>(static_cast<T>(LLVal)) != LLVal)
+        return true;
+      Result = LLVal;
+      return false;
+    }
+
+    template <typename T>
+    typename std::enable_if<!std::numeric_limits<T>::is_signed, bool>::type
+    consumeInteger(unsigned Radix, T &Result) {
+      unsigned long long ULLVal;
+      if (consumeUnsignedInteger(*this, Radix, ULLVal) ||
+          static_cast<unsigned long long>(static_cast<T>(ULLVal)) != ULLVal)
+        return true;
+      Result = ULLVal;
+      return false;
+    }
+
+    /// Parse the current string as an integer of the specified \p Radix, or of
+    /// an autosensed radix if the \p Radix given is 0.  The current value in
+    /// \p Result is discarded, and the storage is changed to be wide enough to
+    /// store the parsed integer.
+    ///
+    /// \returns true if the string does not solely consist of a valid
+    /// non-empty number in the appropriate base.
+    ///
+    /// APInt::fromString is superficially similar but assumes the
+    /// string is well-formed in the given radix.
+    bool getAsInteger(unsigned Radix, APInt &Result) const;
+
+    /// Parse the current string as an IEEE double-precision floating
+    /// point value.  The string must be a well-formed double.
+    ///
+    /// If \p AllowInexact is false, the function will fail if the string
+    /// cannot be represented exactly.  Otherwise, the function only fails
+    /// in case of an overflow or underflow.
+    bool getAsDouble(double &Result, bool AllowInexact = true) const;
+
+    /// @}
+    /// @name String Operations
+    /// @{
+
+    // Convert the given ASCII string to lowercase.
+    LLVM_NODISCARD
+    std::string lower() const;
+
+    /// Convert the given ASCII string to uppercase.
+    LLVM_NODISCARD
+    std::string upper() const;
+
+    /// @}
+    /// @name Substring Operations
+    /// @{
+
+    /// Return a reference to the substring from [Start, Start + N).
+    ///
+    /// \param Start The index of the starting character in the substring; if
+    /// the index is npos or greater than the length of the string then the
+    /// empty substring will be returned.
+    ///
+    /// \param N The number of characters to included in the substring. If N
+    /// exceeds the number of characters remaining in the string, the string
+    /// suffix (starting with \p Start) will be returned.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef substr(size_t Start, size_t N = npos) const {
+      Start = std::min(Start, Length);
+      return StringRef(Data + Start, std::min(N, Length - Start));
+    }
+
+    /// Return a StringRef equal to 'this' but with only the first \p N
+    /// elements remaining.  If \p N is greater than the length of the
+    /// string, the entire string is returned.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef take_front(size_t N = 1) const {
+      if (N >= size())
+        return *this;
+      return drop_back(size() - N);
+    }
+
+    /// Return a StringRef equal to 'this' but with only the last \p N
+    /// elements remaining.  If \p N is greater than the length of the
+    /// string, the entire string is returned.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef take_back(size_t N = 1) const {
+      if (N >= size())
+        return *this;
+      return drop_front(size() - N);
+    }
+
+    /// Return the longest prefix of 'this' such that every character
+    /// in the prefix satisfies the given predicate.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef take_while(function_ref<bool(char)> F) const {
+      return substr(0, find_if_not(F));
+    }
+
+    /// Return the longest prefix of 'this' such that no character in
+    /// the prefix satisfies the given predicate.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef take_until(function_ref<bool(char)> F) const {
+      return substr(0, find_if(F));
+    }
+
+    /// Return a StringRef equal to 'this' but with the first \p N elements
+    /// dropped.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef drop_front(size_t N = 1) const {
+      assert(size() >= N && "Dropping more elements than exist");
+      return substr(N);
+    }
+
+    /// Return a StringRef equal to 'this' but with the last \p N elements
+    /// dropped.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef drop_back(size_t N = 1) const {
+      assert(size() >= N && "Dropping more elements than exist");
+      return substr(0, size()-N);
+    }
+
+    /// Return a StringRef equal to 'this', but with all characters satisfying
+    /// the given predicate dropped from the beginning of the string.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef drop_while(function_ref<bool(char)> F) const {
+      return substr(find_if_not(F));
+    }
+
+    /// Return a StringRef equal to 'this', but with all characters not
+    /// satisfying the given predicate dropped from the beginning of the string.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef drop_until(function_ref<bool(char)> F) const {
+      return substr(find_if(F));
+    }
+
+    /// Returns true if this StringRef has the given prefix and removes that
+    /// prefix.
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool consume_front(StringRef Prefix) {
+      if (!startswith(Prefix))
+        return false;
+
+      *this = drop_front(Prefix.size());
+      return true;
+    }
+
+    /// Returns true if this StringRef has the given suffix and removes that
+    /// suffix.
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    bool consume_back(StringRef Suffix) {
+      if (!endswith(Suffix))
+        return false;
+
+      *this = drop_back(Suffix.size());
+      return true;
+    }
+
+    /// Return a reference to the substring from [Start, End).
+    ///
+    /// \param Start The index of the starting character in the substring; if
+    /// the index is npos or greater than the length of the string then the
+    /// empty substring will be returned.
+    ///
+    /// \param End The index following the last character to include in the
+    /// substring. If this is npos or exceeds the number of characters
+    /// remaining in the string, the string suffix (starting with \p Start)
+    /// will be returned. If this is less than \p Start, an empty string will
+    /// be returned.
+    LLVM_NODISCARD
+    LLVM_ATTRIBUTE_ALWAYS_INLINE
+    StringRef slice(size_t Start, size_t End) const {
+      Start = std::min(Start, Length);
+      End = std::min(std::max(Start, End), Length);
+      return StringRef(Data + Start, End - Start);
+    }
+
+    /// Split into two substrings around the first occurrence of a separator
+    /// character.
+    ///
+    /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
+    /// such that (*this == LHS + Separator + RHS) is true and RHS is
+    /// maximal. If \p Separator is not in the string, then the result is a
+    /// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
+    ///
+    /// \param Separator The character to split on.
+    /// \returns The split substrings.
+    LLVM_NODISCARD
+    std::pair<StringRef, StringRef> split(char Separator) const {
+      size_t Idx = find(Separator);
+      if (Idx == npos)
+        return std::make_pair(*this, StringRef());
+      return std::make_pair(slice(0, Idx), slice(Idx+1, npos));
+    }
+
+    /// Split into two substrings around the first occurrence of a separator
+    /// string.
+    ///
+    /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
+    /// such that (*this == LHS + Separator + RHS) is true and RHS is
+    /// maximal. If \p Separator is not in the string, then the result is a
+    /// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
+    ///
+    /// \param Separator - The string to split on.
+    /// \return - The split substrings.
+    LLVM_NODISCARD
+    std::pair<StringRef, StringRef> split(StringRef Separator) const {
+      size_t Idx = find(Separator);
+      if (Idx == npos)
+        return std::make_pair(*this, StringRef());
+      return std::make_pair(slice(0, Idx), slice(Idx + Separator.size(), npos));
+    }
+
+    /// Split into substrings around the occurrences of a separator string.
+    ///
+    /// Each substring is stored in \p A. If \p MaxSplit is >= 0, at most
+    /// \p MaxSplit splits are done and consequently <= \p MaxSplit + 1
+    /// elements are added to A.
+    /// If \p KeepEmpty is false, empty strings are not added to \p A. They
+    /// still count when considering \p MaxSplit
+    /// An useful invariant is that
+    /// Separator.join(A) == *this if MaxSplit == -1 and KeepEmpty == true
+    ///
+    /// \param A - Where to put the substrings.
+    /// \param Separator - The string to split on.
+    /// \param MaxSplit - The maximum number of times the string is split.
+    /// \param KeepEmpty - True if empty substring should be added.
+    void split(SmallVectorImpl<StringRef> &A,
+               StringRef Separator, int MaxSplit = -1,
+               bool KeepEmpty = true) const;
+
+    /// Split into substrings around the occurrences of a separator character.
+    ///
+    /// Each substring is stored in \p A. If \p MaxSplit is >= 0, at most
+    /// \p MaxSplit splits are done and consequently <= \p MaxSplit + 1
+    /// elements are added to A.
+    /// If \p KeepEmpty is false, empty strings are not added to \p A. They
+    /// still count when considering \p MaxSplit
+    /// An useful invariant is that
+    /// Separator.join(A) == *this if MaxSplit == -1 and KeepEmpty == true
+    ///
+    /// \param A - Where to put the substrings.
+    /// \param Separator - The string to split on.
+    /// \param MaxSplit - The maximum number of times the string is split.
+    /// \param KeepEmpty - True if empty substring should be added.
+    void split(SmallVectorImpl<StringRef> &A, char Separator, int MaxSplit = -1,
+               bool KeepEmpty = true) const;
+
+    /// Split into two substrings around the last occurrence of a separator
+    /// character.
+    ///
+    /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
+    /// such that (*this == LHS + Separator + RHS) is true and RHS is
+    /// minimal. If \p Separator is not in the string, then the result is a
+    /// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
+    ///
+    /// \param Separator - The character to split on.
+    /// \return - The split substrings.
+    LLVM_NODISCARD
+    std::pair<StringRef, StringRef> rsplit(char Separator) const {
+      size_t Idx = rfind(Separator);
+      if (Idx == npos)
+        return std::make_pair(*this, StringRef());
+      return std::make_pair(slice(0, Idx), slice(Idx+1, npos));
+    }
+
+    /// Return string with consecutive \p Char characters starting from the
+    /// the left removed.
+    LLVM_NODISCARD
+    StringRef ltrim(char Char) const {
+      return drop_front(std::min(Length, find_first_not_of(Char)));
+    }
+
+    /// Return string with consecutive characters in \p Chars starting from
+    /// the left removed.
+    LLVM_NODISCARD
+    StringRef ltrim(StringRef Chars = " \t\n\v\f\r") const {
+      return drop_front(std::min(Length, find_first_not_of(Chars)));
+    }
+
+    /// Return string with consecutive \p Char characters starting from the
+    /// right removed.
+    LLVM_NODISCARD
+    StringRef rtrim(char Char) const {
+      return drop_back(Length - std::min(Length, find_last_not_of(Char) + 1));
+    }
+
+    /// Return string with consecutive characters in \p Chars starting from
+    /// the right removed.
+    LLVM_NODISCARD
+    StringRef rtrim(StringRef Chars = " \t\n\v\f\r") const {
+      return drop_back(Length - std::min(Length, find_last_not_of(Chars) + 1));
+    }
+
+    /// Return string with consecutive \p Char characters starting from the
+    /// left and right removed.
+    LLVM_NODISCARD
+    StringRef trim(char Char) const {
+      return ltrim(Char).rtrim(Char);
+    }
+
+    /// Return string with consecutive characters in \p Chars starting from
+    /// the left and right removed.
+    LLVM_NODISCARD
+    StringRef trim(StringRef Chars = " \t\n\v\f\r") const {
+      return ltrim(Chars).rtrim(Chars);
+    }
+
+    /// @}
+  };
+
+  /// A wrapper around a string literal that serves as a proxy for constructing
+  /// global tables of StringRefs with the length computed at compile time.
+  /// In order to avoid the invocation of a global constructor, StringLiteral
+  /// should *only* be used in a constexpr context, as such:
+  ///
+  /// constexpr StringLiteral S("test");
+  ///
+  class StringLiteral : public StringRef {
+  private:
+    constexpr StringLiteral(const char *Str, size_t N) : StringRef(Str, N) {
+    }
+
+  public:
+    template <size_t N>
+    constexpr StringLiteral(const char (&Str)[N])
+#if defined(__clang__) && __has_attribute(enable_if)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wgcc-compat"
+        __attribute((enable_if(__builtin_strlen(Str) == N - 1,
+                               "invalid string literal")))
+#pragma clang diagnostic pop
+#endif
+        : StringRef(Str, N - 1) {
+    }
+
+    // Explicit construction for strings like "foo\0bar".
+    template <size_t N>
+    static constexpr StringLiteral withInnerNUL(const char (&Str)[N]) {
+      return StringLiteral(Str, N - 1);
+    }
+  };
+
+  /// @name StringRef Comparison Operators
+  /// @{
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  inline bool operator==(StringRef LHS, StringRef RHS) {
+    return LHS.equals(RHS);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  inline bool operator!=(StringRef LHS, StringRef RHS) { return !(LHS == RHS); }
+
+  inline bool operator<(StringRef LHS, StringRef RHS) {
+    return LHS.compare(RHS) == -1;
+  }
+
+  inline bool operator<=(StringRef LHS, StringRef RHS) {
+    return LHS.compare(RHS) != 1;
+  }
+
+  inline bool operator>(StringRef LHS, StringRef RHS) {
+    return LHS.compare(RHS) == 1;
+  }
+
+  inline bool operator>=(StringRef LHS, StringRef RHS) {
+    return LHS.compare(RHS) != -1;
+  }
+
+  inline std::string &operator+=(std::string &buffer, StringRef string) {
+    return buffer.append(string.data(), string.size());
+  }
+
+  /// @}
+
+  /// \brief Compute a hash_code for a StringRef.
+  LLVM_NODISCARD
+  hash_code hash_value(StringRef S);
+
+  // StringRefs can be treated like a POD type.
+  template <typename T> struct isPodLike;
+  template <> struct isPodLike<StringRef> { static const bool value = true; };
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGREF_H
diff --git a/linux-x64/clang/include/llvm/ADT/StringSet.h b/linux-x64/clang/include/llvm/ADT/StringSet.h
new file mode 100644
index 0000000..9af44c0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/StringSet.h
@@ -0,0 +1,52 @@
+//===- StringSet.h - The LLVM Compiler Driver -------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open
+// Source License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  StringSet - A set-like wrapper for the StringMap.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGSET_H
+#define LLVM_ADT_STRINGSET_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <initializer_list>
+#include <utility>
+
+namespace llvm {
+
+  /// StringSet - A wrapper for StringMap that provides set-like functionality.
+  template <class AllocatorTy = MallocAllocator>
+  class StringSet : public StringMap<char, AllocatorTy> {
+    using base = StringMap<char, AllocatorTy>;
+
+  public:
+    StringSet() = default;
+    StringSet(std::initializer_list<StringRef> S) {
+      for (StringRef X : S)
+        insert(X);
+    }
+
+    std::pair<typename base::iterator, bool> insert(StringRef Key) {
+      assert(!Key.empty());
+      return base::insert(std::make_pair(Key, '\0'));
+    }
+
+    template <typename InputIt>
+    void insert(const InputIt &Begin, const InputIt &End) {
+      for (auto It = Begin; It != End; ++It)
+        base::insert(std::make_pair(*It, '\0'));
+    }
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGSET_H
diff --git a/linux-x64/clang/include/llvm/ADT/StringSwitch.h b/linux-x64/clang/include/llvm/ADT/StringSwitch.h
new file mode 100644
index 0000000..9e07303
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/StringSwitch.h
@@ -0,0 +1,219 @@
+//===--- StringSwitch.h - Switch-on-literal-string Construct --------------===/
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+//  This file implements the StringSwitch template, which mimics a switch()
+//  statement whose cases are string literals.
+//
+//===----------------------------------------------------------------------===/
+#ifndef LLVM_ADT_STRINGSWITCH_H
+#define LLVM_ADT_STRINGSWITCH_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
+#include <cassert>
+#include <cstring>
+
+namespace llvm {
+
+/// \brief A switch()-like statement whose cases are string literals.
+///
+/// The StringSwitch class is a simple form of a switch() statement that
+/// determines whether the given string matches one of the given string
+/// literals. The template type parameter \p T is the type of the value that
+/// will be returned from the string-switch expression. For example,
+/// the following code switches on the name of a color in \c argv[i]:
+///
+/// \code
+/// Color color = StringSwitch<Color>(argv[i])
+///   .Case("red", Red)
+///   .Case("orange", Orange)
+///   .Case("yellow", Yellow)
+///   .Case("green", Green)
+///   .Case("blue", Blue)
+///   .Case("indigo", Indigo)
+///   .Cases("violet", "purple", Violet)
+///   .Default(UnknownColor);
+/// \endcode
+template<typename T, typename R = T>
+class StringSwitch {
+  /// \brief The string we are matching.
+  const StringRef Str;
+
+  /// \brief The pointer to the result of this switch statement, once known,
+  /// null before that.
+  Optional<T> Result;
+
+public:
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  explicit StringSwitch(StringRef S)
+  : Str(S), Result() { }
+
+  // StringSwitch is not copyable.
+  StringSwitch(const StringSwitch &) = delete;
+
+  // StringSwitch is not assignable due to 'Str' being 'const'.
+  void operator=(const StringSwitch &) = delete;
+  void operator=(StringSwitch &&other) = delete;
+
+  StringSwitch(StringSwitch &&other)
+    : Str(other.Str), Result(std::move(other.Result)) { }
+
+  ~StringSwitch() = default;
+
+  // Case-sensitive case matchers
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Case(StringLiteral S, T Value) {
+    if (!Result && Str == S) {
+      Result = std::move(Value);
+    }
+    return *this;
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch& EndsWith(StringLiteral S, T Value) {
+    if (!Result && Str.endswith(S)) {
+      Result = std::move(Value);
+    }
+    return *this;
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch& StartsWith(StringLiteral S, T Value) {
+    if (!Result && Str.startswith(S)) {
+      Result = std::move(Value);
+    }
+    return *this;
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, T Value) {
+    return Case(S0, Value).Case(S1, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                      T Value) {
+    return Case(S0, Value).Cases(S1, S2, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                      StringLiteral S3, T Value) {
+    return Case(S0, Value).Cases(S1, S2, S3, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                      StringLiteral S3, StringLiteral S4, T Value) {
+    return Case(S0, Value).Cases(S1, S2, S3, S4, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
+                      T Value) {
+    return Case(S0, Value).Cases(S1, S2, S3, S4, S5, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
+                      StringLiteral S6, T Value) {
+    return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
+                      StringLiteral S6, StringLiteral S7, T Value) {
+    return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
+                      StringLiteral S6, StringLiteral S7, StringLiteral S8,
+                      T Value) {
+    return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, S8, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
+                      StringLiteral S6, StringLiteral S7, StringLiteral S8,
+                      StringLiteral S9, T Value) {
+    return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, S8, S9, Value);
+  }
+
+  // Case-insensitive case matchers.
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &CaseLower(StringLiteral S, T Value) {
+    if (!Result && Str.equals_lower(S))
+      Result = std::move(Value);
+
+    return *this;
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &EndsWithLower(StringLiteral S, T Value) {
+    if (!Result && Str.endswith_lower(S))
+      Result = Value;
+
+    return *this;
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &StartsWithLower(StringLiteral S, T Value) {
+    if (!Result && Str.startswith_lower(S))
+      Result = std::move(Value);
+
+    return *this;
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, T Value) {
+    return CaseLower(S0, Value).CaseLower(S1, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                           T Value) {
+    return CaseLower(S0, Value).CasesLower(S1, S2, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                           StringLiteral S3, T Value) {
+    return CaseLower(S0, Value).CasesLower(S1, S2, S3, Value);
+  }
+
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+                           StringLiteral S3, StringLiteral S4, T Value) {
+    return CaseLower(S0, Value).CasesLower(S1, S2, S3, S4, Value);
+  }
+
+  LLVM_NODISCARD
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  R Default(T Value) {
+    if (Result)
+      return std::move(*Result);
+    return Value;
+  }
+
+  LLVM_NODISCARD
+  LLVM_ATTRIBUTE_ALWAYS_INLINE
+  operator R() {
+    assert(Result && "Fell off the end of a string-switch");
+    return std::move(*Result);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGSWITCH_H
diff --git a/linux-x64/clang/include/llvm/ADT/TinyPtrVector.h b/linux-x64/clang/include/llvm/ADT/TinyPtrVector.h
new file mode 100644
index 0000000..73573d6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/TinyPtrVector.h
@@ -0,0 +1,347 @@
+//===- llvm/ADT/TinyPtrVector.h - 'Normally tiny' vectors -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_TINYPTRVECTOR_H
+#define LLVM_ADT_TINYPTRVECTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallVector.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+
+namespace llvm {
+
+/// TinyPtrVector - This class is specialized for cases where there are
+/// normally 0 or 1 element in a vector, but is general enough to go beyond that
+/// when required.
+///
+/// NOTE: This container doesn't allow you to store a null pointer into it.
+///
+template <typename EltTy>
+class TinyPtrVector {
+public:
+  using VecTy = SmallVector<EltTy, 4>;
+  using value_type = typename VecTy::value_type;
+  using PtrUnion = PointerUnion<EltTy, VecTy *>;
+
+private:
+  PtrUnion Val;
+
+public:
+  TinyPtrVector() = default;
+
+  ~TinyPtrVector() {
+    if (VecTy *V = Val.template dyn_cast<VecTy*>())
+      delete V;
+  }
+
+  TinyPtrVector(const TinyPtrVector &RHS) : Val(RHS.Val) {
+    if (VecTy *V = Val.template dyn_cast<VecTy*>())
+      Val = new VecTy(*V);
+  }
+
+  TinyPtrVector &operator=(const TinyPtrVector &RHS) {
+    if (this == &RHS)
+      return *this;
+    if (RHS.empty()) {
+      this->clear();
+      return *this;
+    }
+
+    // Try to squeeze into the single slot. If it won't fit, allocate a copied
+    // vector.
+    if (Val.template is<EltTy>()) {
+      if (RHS.size() == 1)
+        Val = RHS.front();
+      else
+        Val = new VecTy(*RHS.Val.template get<VecTy*>());
+      return *this;
+    }
+
+    // If we have a full vector allocated, try to re-use it.
+    if (RHS.Val.template is<EltTy>()) {
+      Val.template get<VecTy*>()->clear();
+      Val.template get<VecTy*>()->push_back(RHS.front());
+    } else {
+      *Val.template get<VecTy*>() = *RHS.Val.template get<VecTy*>();
+    }
+    return *this;
+  }
+
+  TinyPtrVector(TinyPtrVector &&RHS) : Val(RHS.Val) {
+    RHS.Val = (EltTy)nullptr;
+  }
+
+  TinyPtrVector &operator=(TinyPtrVector &&RHS) {
+    if (this == &RHS)
+      return *this;
+    if (RHS.empty()) {
+      this->clear();
+      return *this;
+    }
+
+    // If this vector has been allocated on the heap, re-use it if cheap. If it
+    // would require more copying, just delete it and we'll steal the other
+    // side.
+    if (VecTy *V = Val.template dyn_cast<VecTy*>()) {
+      if (RHS.Val.template is<EltTy>()) {
+        V->clear();
+        V->push_back(RHS.front());
+        RHS.Val = (EltTy)nullptr;
+        return *this;
+      }
+      delete V;
+    }
+
+    Val = RHS.Val;
+    RHS.Val = (EltTy)nullptr;
+    return *this;
+  }
+
+  /// Constructor from an ArrayRef.
+  ///
+  /// This also is a constructor for individual array elements due to the single
+  /// element constructor for ArrayRef.
+  explicit TinyPtrVector(ArrayRef<EltTy> Elts)
+      : Val(Elts.empty()
+                ? PtrUnion()
+                : Elts.size() == 1
+                      ? PtrUnion(Elts[0])
+                      : PtrUnion(new VecTy(Elts.begin(), Elts.end()))) {}
+
+  TinyPtrVector(size_t Count, EltTy Value)
+      : Val(Count == 0 ? PtrUnion()
+                       : Count == 1 ? PtrUnion(Value)
+                                    : PtrUnion(new VecTy(Count, Value))) {}
+
+  // implicit conversion operator to ArrayRef.
+  operator ArrayRef<EltTy>() const {
+    if (Val.isNull())
+      return None;
+    if (Val.template is<EltTy>())
+      return *Val.getAddrOfPtr1();
+    return *Val.template get<VecTy*>();
+  }
+
+  // implicit conversion operator to MutableArrayRef.
+  operator MutableArrayRef<EltTy>() {
+    if (Val.isNull())
+      return None;
+    if (Val.template is<EltTy>())
+      return *Val.getAddrOfPtr1();
+    return *Val.template get<VecTy*>();
+  }
+
+  // Implicit conversion to ArrayRef<U> if EltTy* implicitly converts to U*.
+  template<typename U,
+           typename std::enable_if<
+               std::is_convertible<ArrayRef<EltTy>, ArrayRef<U>>::value,
+               bool>::type = false>
+  operator ArrayRef<U>() const {
+    return operator ArrayRef<EltTy>();
+  }
+
+  bool empty() const {
+    // This vector can be empty if it contains no element, or if it
+    // contains a pointer to an empty vector.
+    if (Val.isNull()) return true;
+    if (VecTy *Vec = Val.template dyn_cast<VecTy*>())
+      return Vec->empty();
+    return false;
+  }
+
+  unsigned size() const {
+    if (empty())
+      return 0;
+    if (Val.template is<EltTy>())
+      return 1;
+    return Val.template get<VecTy*>()->size();
+  }
+
+  using iterator = EltTy *;
+  using const_iterator = const EltTy *;
+  using reverse_iterator = std::reverse_iterator<iterator>;
+  using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+  iterator begin() {
+    if (Val.template is<EltTy>())
+      return Val.getAddrOfPtr1();
+
+    return Val.template get<VecTy *>()->begin();
+  }
+
+  iterator end() {
+    if (Val.template is<EltTy>())
+      return begin() + (Val.isNull() ? 0 : 1);
+
+    return Val.template get<VecTy *>()->end();
+  }
+
+  const_iterator begin() const {
+    return (const_iterator)const_cast<TinyPtrVector*>(this)->begin();
+  }
+
+  const_iterator end() const {
+    return (const_iterator)const_cast<TinyPtrVector*>(this)->end();
+  }
+
+  reverse_iterator rbegin() { return reverse_iterator(end()); }
+  reverse_iterator rend() { return reverse_iterator(begin()); }
+
+  const_reverse_iterator rbegin() const {
+    return const_reverse_iterator(end());
+  }
+
+  const_reverse_iterator rend() const {
+    return const_reverse_iterator(begin());
+  }
+
+  EltTy operator[](unsigned i) const {
+    assert(!Val.isNull() && "can't index into an empty vector");
+    if (EltTy V = Val.template dyn_cast<EltTy>()) {
+      assert(i == 0 && "tinyvector index out of range");
+      return V;
+    }
+
+    assert(i < Val.template get<VecTy*>()->size() &&
+           "tinyvector index out of range");
+    return (*Val.template get<VecTy*>())[i];
+  }
+
+  EltTy front() const {
+    assert(!empty() && "vector empty");
+    if (EltTy V = Val.template dyn_cast<EltTy>())
+      return V;
+    return Val.template get<VecTy*>()->front();
+  }
+
+  EltTy back() const {
+    assert(!empty() && "vector empty");
+    if (EltTy V = Val.template dyn_cast<EltTy>())
+      return V;
+    return Val.template get<VecTy*>()->back();
+  }
+
+  void push_back(EltTy NewVal) {
+    assert(NewVal && "Can't add a null value");
+
+    // If we have nothing, add something.
+    if (Val.isNull()) {
+      Val = NewVal;
+      return;
+    }
+
+    // If we have a single value, convert to a vector.
+    if (EltTy V = Val.template dyn_cast<EltTy>()) {
+      Val = new VecTy();
+      Val.template get<VecTy*>()->push_back(V);
+    }
+
+    // Add the new value, we know we have a vector.
+    Val.template get<VecTy*>()->push_back(NewVal);
+  }
+
+  void pop_back() {
+    // If we have a single value, convert to empty.
+    if (Val.template is<EltTy>())
+      Val = (EltTy)nullptr;
+    else if (VecTy *Vec = Val.template get<VecTy*>())
+      Vec->pop_back();
+  }
+
+  void clear() {
+    // If we have a single value, convert to empty.
+    if (Val.template is<EltTy>()) {
+      Val = (EltTy)nullptr;
+    } else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
+      // If we have a vector form, just clear it.
+      Vec->clear();
+    }
+    // Otherwise, we're already empty.
+  }
+
+  iterator erase(iterator I) {
+    assert(I >= begin() && "Iterator to erase is out of bounds.");
+    assert(I < end() && "Erasing at past-the-end iterator.");
+
+    // If we have a single value, convert to empty.
+    if (Val.template is<EltTy>()) {
+      if (I == begin())
+        Val = (EltTy)nullptr;
+    } else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
+      // multiple items in a vector; just do the erase, there is no
+      // benefit to collapsing back to a pointer
+      return Vec->erase(I);
+    }
+    return end();
+  }
+
+  iterator erase(iterator S, iterator E) {
+    assert(S >= begin() && "Range to erase is out of bounds.");
+    assert(S <= E && "Trying to erase invalid range.");
+    assert(E <= end() && "Trying to erase past the end.");
+
+    if (Val.template is<EltTy>()) {
+      if (S == begin() && S != E)
+        Val = (EltTy)nullptr;
+    } else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
+      return Vec->erase(S, E);
+    }
+    return end();
+  }
+
+  iterator insert(iterator I, const EltTy &Elt) {
+    assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+    assert(I <= this->end() && "Inserting past the end of the vector.");
+    if (I == end()) {
+      push_back(Elt);
+      return std::prev(end());
+    }
+    assert(!Val.isNull() && "Null value with non-end insert iterator.");
+    if (EltTy V = Val.template dyn_cast<EltTy>()) {
+      assert(I == begin());
+      Val = Elt;
+      push_back(V);
+      return begin();
+    }
+
+    return Val.template get<VecTy*>()->insert(I, Elt);
+  }
+
+  template<typename ItTy>
+  iterator insert(iterator I, ItTy From, ItTy To) {
+    assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+    assert(I <= this->end() && "Inserting past the end of the vector.");
+    if (From == To)
+      return I;
+
+    // If we have a single value, convert to a vector.
+    ptrdiff_t Offset = I - begin();
+    if (Val.isNull()) {
+      if (std::next(From) == To) {
+        Val = *From;
+        return begin();
+      }
+
+      Val = new VecTy();
+    } else if (EltTy V = Val.template dyn_cast<EltTy>()) {
+      Val = new VecTy();
+      Val.template get<VecTy*>()->push_back(V);
+    }
+    return Val.template get<VecTy*>()->insert(begin() + Offset, From, To);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_TINYPTRVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/Triple.h b/linux-x64/clang/include/llvm/ADT/Triple.h
new file mode 100644
index 0000000..8ba50d9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/Triple.h
@@ -0,0 +1,809 @@
+//===-- llvm/ADT/Triple.h - Target triple helper class ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_TRIPLE_H
+#define LLVM_ADT_TRIPLE_H
+
+#include "llvm/ADT/Twine.h"
+
+// Some system headers or GCC predefined macros conflict with identifiers in
+// this file.  Undefine them here.
+#undef NetBSD
+#undef mips
+#undef sparc
+
+namespace llvm {
+
+/// Triple - Helper class for working with autoconf configuration names. For
+/// historical reasons, we also call these 'triples' (they used to contain
+/// exactly three fields).
+///
+/// Configuration names are strings in the canonical form:
+///   ARCHITECTURE-VENDOR-OPERATING_SYSTEM
+/// or
+///   ARCHITECTURE-VENDOR-OPERATING_SYSTEM-ENVIRONMENT
+///
+/// This class is used for clients which want to support arbitrary
+/// configuration names, but also want to implement certain special
+/// behavior for particular configurations. This class isolates the mapping
+/// from the components of the configuration name to well known IDs.
+///
+/// At its core the Triple class is designed to be a wrapper for a triple
+/// string; the constructor does not change or normalize the triple string.
+/// Clients that need to handle the non-canonical triples that users often
+/// specify should use the normalize method.
+///
+/// See autoconf/config.guess for a glimpse into what configuration names
+/// look like in practice.
+class Triple {
+public:
+  enum ArchType {
+    UnknownArch,
+
+    arm,            // ARM (little endian): arm, armv.*, xscale
+    armeb,          // ARM (big endian): armeb
+    aarch64,        // AArch64 (little endian): aarch64
+    aarch64_be,     // AArch64 (big endian): aarch64_be
+    arc,            // ARC: Synopsys ARC
+    avr,            // AVR: Atmel AVR microcontroller
+    bpfel,          // eBPF or extended BPF or 64-bit BPF (little endian)
+    bpfeb,          // eBPF or extended BPF or 64-bit BPF (big endian)
+    hexagon,        // Hexagon: hexagon
+    mips,           // MIPS: mips, mipsallegrex
+    mipsel,         // MIPSEL: mipsel, mipsallegrexel
+    mips64,         // MIPS64: mips64
+    mips64el,       // MIPS64EL: mips64el
+    msp430,         // MSP430: msp430
+    nios2,          // NIOSII: nios2
+    ppc,            // PPC: powerpc
+    ppc64,          // PPC64: powerpc64, ppu
+    ppc64le,        // PPC64LE: powerpc64le
+    r600,           // R600: AMD GPUs HD2XXX - HD6XXX
+    amdgcn,         // AMDGCN: AMD GCN GPUs
+    riscv32,        // RISC-V (32-bit): riscv32
+    riscv64,        // RISC-V (64-bit): riscv64
+    sparc,          // Sparc: sparc
+    sparcv9,        // Sparcv9: Sparcv9
+    sparcel,        // Sparc: (endianness = little). NB: 'Sparcle' is a CPU variant
+    systemz,        // SystemZ: s390x
+    tce,            // TCE (http://tce.cs.tut.fi/): tce
+    tcele,          // TCE little endian (http://tce.cs.tut.fi/): tcele
+    thumb,          // Thumb (little endian): thumb, thumbv.*
+    thumbeb,        // Thumb (big endian): thumbeb
+    x86,            // X86: i[3-9]86
+    x86_64,         // X86-64: amd64, x86_64
+    xcore,          // XCore: xcore
+    nvptx,          // NVPTX: 32-bit
+    nvptx64,        // NVPTX: 64-bit
+    le32,           // le32: generic little-endian 32-bit CPU (PNaCl)
+    le64,           // le64: generic little-endian 64-bit CPU (PNaCl)
+    amdil,          // AMDIL
+    amdil64,        // AMDIL with 64-bit pointers
+    hsail,          // AMD HSAIL
+    hsail64,        // AMD HSAIL with 64-bit pointers
+    spir,           // SPIR: standard portable IR for OpenCL 32-bit version
+    spir64,         // SPIR: standard portable IR for OpenCL 64-bit version
+    kalimba,        // Kalimba: generic kalimba
+    shave,          // SHAVE: Movidius vector VLIW processors
+    lanai,          // Lanai: Lanai 32-bit
+    wasm32,         // WebAssembly with 32-bit pointers
+    wasm64,         // WebAssembly with 64-bit pointers
+    renderscript32, // 32-bit RenderScript
+    renderscript64, // 64-bit RenderScript
+    LastArchType = renderscript64
+  };
+  enum SubArchType {
+    NoSubArch,
+
+    ARMSubArch_v8_3a,
+    ARMSubArch_v8_2a,
+    ARMSubArch_v8_1a,
+    ARMSubArch_v8,
+    ARMSubArch_v8r,
+    ARMSubArch_v8m_baseline,
+    ARMSubArch_v8m_mainline,
+    ARMSubArch_v7,
+    ARMSubArch_v7em,
+    ARMSubArch_v7m,
+    ARMSubArch_v7s,
+    ARMSubArch_v7k,
+    ARMSubArch_v7ve,
+    ARMSubArch_v6,
+    ARMSubArch_v6m,
+    ARMSubArch_v6k,
+    ARMSubArch_v6t2,
+    ARMSubArch_v5,
+    ARMSubArch_v5te,
+    ARMSubArch_v4t,
+
+    KalimbaSubArch_v3,
+    KalimbaSubArch_v4,
+    KalimbaSubArch_v5
+  };
+  enum VendorType {
+    UnknownVendor,
+
+    Apple,
+    PC,
+    SCEI,
+    BGP,
+    BGQ,
+    Freescale,
+    IBM,
+    ImaginationTechnologies,
+    MipsTechnologies,
+    NVIDIA,
+    CSR,
+    Myriad,
+    AMD,
+    Mesa,
+    SUSE,
+    LastVendorType = SUSE
+  };
+  enum OSType {
+    UnknownOS,
+
+    Ananas,
+    CloudABI,
+    Darwin,
+    DragonFly,
+    FreeBSD,
+    Fuchsia,
+    IOS,
+    KFreeBSD,
+    Linux,
+    Lv2,        // PS3
+    MacOSX,
+    NetBSD,
+    OpenBSD,
+    Solaris,
+    Win32,
+    Haiku,
+    Minix,
+    RTEMS,
+    NaCl,       // Native Client
+    CNK,        // BG/P Compute-Node Kernel
+    AIX,
+    CUDA,       // NVIDIA CUDA
+    NVCL,       // NVIDIA OpenCL
+    AMDHSA,     // AMD HSA Runtime
+    PS4,
+    ELFIAMCU,
+    TvOS,       // Apple tvOS
+    WatchOS,    // Apple watchOS
+    Mesa3D,
+    Contiki,
+    AMDPAL,     // AMD PAL Runtime
+    LastOSType = AMDPAL
+  };
+  enum EnvironmentType {
+    UnknownEnvironment,
+
+    GNU,
+    GNUABIN32,
+    GNUABI64,
+    GNUEABI,
+    GNUEABIHF,
+    GNUX32,
+    CODE16,
+    EABI,
+    EABIHF,
+    Android,
+    Musl,
+    MuslEABI,
+    MuslEABIHF,
+
+    MSVC,
+    Itanium,
+    Cygnus,
+    CoreCLR,
+    Simulator,  // Simulator variants of other systems, e.g., Apple's iOS
+    LastEnvironmentType = Simulator
+  };
+  enum ObjectFormatType {
+    UnknownObjectFormat,
+
+    COFF,
+    ELF,
+    MachO,
+    Wasm,
+  };
+
+private:
+  std::string Data;
+
+  /// The parsed arch type.
+  ArchType Arch;
+
+  /// The parsed subarchitecture type.
+  SubArchType SubArch;
+
+  /// The parsed vendor type.
+  VendorType Vendor;
+
+  /// The parsed OS type.
+  OSType OS;
+
+  /// The parsed Environment type.
+  EnvironmentType Environment;
+
+  /// The object format type.
+  ObjectFormatType ObjectFormat;
+
+public:
+  /// @name Constructors
+  /// @{
+
+  /// Default constructor is the same as an empty string and leaves all
+  /// triple fields unknown.
+  Triple()
+      : Data(), Arch(), SubArch(), Vendor(), OS(), Environment(),
+        ObjectFormat() {}
+
+  explicit Triple(const Twine &Str);
+  Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr);
+  Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr,
+         const Twine &EnvironmentStr);
+
+  bool operator==(const Triple &Other) const {
+    return Arch == Other.Arch && SubArch == Other.SubArch &&
+           Vendor == Other.Vendor && OS == Other.OS &&
+           Environment == Other.Environment &&
+           ObjectFormat == Other.ObjectFormat;
+  }
+
+  bool operator!=(const Triple &Other) const {
+    return !(*this == Other);
+  }
+
+  /// @}
+  /// @name Normalization
+  /// @{
+
+  /// normalize - Turn an arbitrary machine specification into the canonical
+  /// triple form (or something sensible that the Triple class understands if
+  /// nothing better can reasonably be done).  In particular, it handles the
+  /// common case in which otherwise valid components are in the wrong order.
+  static std::string normalize(StringRef Str);
+
+  /// Return the normalized form of this triple's string.
+  std::string normalize() const { return normalize(Data); }
+
+  /// @}
+  /// @name Typed Component Access
+  /// @{
+
+  /// getArch - Get the parsed architecture type of this triple.
+  ArchType getArch() const { return Arch; }
+
+  /// getSubArch - get the parsed subarchitecture type for this triple.
+  SubArchType getSubArch() const { return SubArch; }
+
+  /// getVendor - Get the parsed vendor type of this triple.
+  VendorType getVendor() const { return Vendor; }
+
+  /// getOS - Get the parsed operating system type of this triple.
+  OSType getOS() const { return OS; }
+
+  /// hasEnvironment - Does this triple have the optional environment
+  /// (fourth) component?
+  bool hasEnvironment() const {
+    return getEnvironmentName() != "";
+  }
+
+  /// getEnvironment - Get the parsed environment type of this triple.
+  EnvironmentType getEnvironment() const { return Environment; }
+
+  /// Parse the version number from the OS name component of the
+  /// triple, if present.
+  ///
+  /// For example, "fooos1.2.3" would return (1, 2, 3).
+  ///
+  /// If an entry is not defined, it will be returned as 0.
+  void getEnvironmentVersion(unsigned &Major, unsigned &Minor,
+                             unsigned &Micro) const;
+
+  /// getFormat - Get the object format for this triple.
+  ObjectFormatType getObjectFormat() const { return ObjectFormat; }
+
+  /// getOSVersion - Parse the version number from the OS name component of the
+  /// triple, if present.
+  ///
+  /// For example, "fooos1.2.3" would return (1, 2, 3).
+  ///
+  /// If an entry is not defined, it will be returned as 0.
+  void getOSVersion(unsigned &Major, unsigned &Minor, unsigned &Micro) const;
+
+  /// getOSMajorVersion - Return just the major version number, this is
+  /// specialized because it is a common query.
+  unsigned getOSMajorVersion() const {
+    unsigned Maj, Min, Micro;
+    getOSVersion(Maj, Min, Micro);
+    return Maj;
+  }
+
+  /// getMacOSXVersion - Parse the version number as with getOSVersion and then
+  /// translate generic "darwin" versions to the corresponding OS X versions.
+  /// This may also be called with IOS triples but the OS X version number is
+  /// just set to a constant 10.4.0 in that case.  Returns true if successful.
+  bool getMacOSXVersion(unsigned &Major, unsigned &Minor,
+                        unsigned &Micro) const;
+
+  /// getiOSVersion - Parse the version number as with getOSVersion.  This should
+  /// only be called with IOS or generic triples.
+  void getiOSVersion(unsigned &Major, unsigned &Minor,
+                     unsigned &Micro) const;
+
+  /// getWatchOSVersion - Parse the version number as with getOSVersion.  This
+  /// should only be called with WatchOS or generic triples.
+  void getWatchOSVersion(unsigned &Major, unsigned &Minor,
+                         unsigned &Micro) const;
+
+  /// @}
+  /// @name Direct Component Access
+  /// @{
+
+  const std::string &str() const { return Data; }
+
+  const std::string &getTriple() const { return Data; }
+
+  /// getArchName - Get the architecture (first) component of the
+  /// triple.
+  StringRef getArchName() const;
+
+  /// getVendorName - Get the vendor (second) component of the triple.
+  StringRef getVendorName() const;
+
+  /// getOSName - Get the operating system (third) component of the
+  /// triple.
+  StringRef getOSName() const;
+
+  /// getEnvironmentName - Get the optional environment (fourth)
+  /// component of the triple, or "" if empty.
+  StringRef getEnvironmentName() const;
+
+  /// getOSAndEnvironmentName - Get the operating system and optional
+  /// environment components as a single string (separated by a '-'
+  /// if the environment component is present).
+  StringRef getOSAndEnvironmentName() const;
+
+  /// @}
+  /// @name Convenience Predicates
+  /// @{
+
+  /// Test whether the architecture is 64-bit
+  ///
+  /// Note that this tests for 64-bit pointer width, and nothing else. Note
+  /// that we intentionally expose only three predicates, 64-bit, 32-bit, and
+  /// 16-bit. The inner details of pointer width for particular architectures
+  /// is not summed up in the triple, and so only a coarse grained predicate
+  /// system is provided.
+  bool isArch64Bit() const;
+
+  /// Test whether the architecture is 32-bit
+  ///
+  /// Note that this tests for 32-bit pointer width, and nothing else.
+  bool isArch32Bit() const;
+
+  /// Test whether the architecture is 16-bit
+  ///
+  /// Note that this tests for 16-bit pointer width, and nothing else.
+  bool isArch16Bit() const;
+
+  /// isOSVersionLT - Helper function for doing comparisons against version
+  /// numbers included in the target triple.
+  bool isOSVersionLT(unsigned Major, unsigned Minor = 0,
+                     unsigned Micro = 0) const {
+    unsigned LHS[3];
+    getOSVersion(LHS[0], LHS[1], LHS[2]);
+
+    if (LHS[0] != Major)
+      return LHS[0] < Major;
+    if (LHS[1] != Minor)
+      return LHS[1] < Minor;
+    if (LHS[2] != Micro)
+      return LHS[1] < Micro;
+
+    return false;
+  }
+
+  bool isOSVersionLT(const Triple &Other) const {
+    unsigned RHS[3];
+    Other.getOSVersion(RHS[0], RHS[1], RHS[2]);
+    return isOSVersionLT(RHS[0], RHS[1], RHS[2]);
+  }
+
+  /// isMacOSXVersionLT - Comparison function for checking OS X version
+  /// compatibility, which handles supporting skewed version numbering schemes
+  /// used by the "darwin" triples.
+  bool isMacOSXVersionLT(unsigned Major, unsigned Minor = 0,
+                         unsigned Micro = 0) const {
+    assert(isMacOSX() && "Not an OS X triple!");
+
+    // If this is OS X, expect a sane version number.
+    if (getOS() == Triple::MacOSX)
+      return isOSVersionLT(Major, Minor, Micro);
+
+    // Otherwise, compare to the "Darwin" number.
+    assert(Major == 10 && "Unexpected major version");
+    return isOSVersionLT(Minor + 4, Micro, 0);
+  }
+
+  /// isMacOSX - Is this a Mac OS X triple. For legacy reasons, we support both
+  /// "darwin" and "osx" as OS X triples.
+  bool isMacOSX() const {
+    return getOS() == Triple::Darwin || getOS() == Triple::MacOSX;
+  }
+
+  /// Is this an iOS triple.
+  /// Note: This identifies tvOS as a variant of iOS. If that ever
+  /// changes, i.e., if the two operating systems diverge or their version
+  /// numbers get out of sync, that will need to be changed.
+  /// watchOS has completely different version numbers so it is not included.
+  bool isiOS() const {
+    return getOS() == Triple::IOS || isTvOS();
+  }
+
+  /// Is this an Apple tvOS triple.
+  bool isTvOS() const {
+    return getOS() == Triple::TvOS;
+  }
+
+  /// Is this an Apple watchOS triple.
+  bool isWatchOS() const {
+    return getOS() == Triple::WatchOS;
+  }
+
+  bool isWatchABI() const {
+    return getSubArch() == Triple::ARMSubArch_v7k;
+  }
+
+  /// isOSDarwin - Is this a "Darwin" OS (OS X, iOS, or watchOS).
+  bool isOSDarwin() const {
+    return isMacOSX() || isiOS() || isWatchOS();
+  }
+
+  bool isSimulatorEnvironment() const {
+    return getEnvironment() == Triple::Simulator;
+  }
+
+  bool isOSNetBSD() const {
+    return getOS() == Triple::NetBSD;
+  }
+
+  bool isOSOpenBSD() const {
+    return getOS() == Triple::OpenBSD;
+  }
+
+  bool isOSFreeBSD() const {
+    return getOS() == Triple::FreeBSD;
+  }
+
+  bool isOSFuchsia() const {
+    return getOS() == Triple::Fuchsia;
+  }
+
+  bool isOSDragonFly() const { return getOS() == Triple::DragonFly; }
+
+  bool isOSSolaris() const {
+    return getOS() == Triple::Solaris;
+  }
+
+  bool isOSIAMCU() const {
+    return getOS() == Triple::ELFIAMCU;
+  }
+
+  bool isOSUnknown() const { return getOS() == Triple::UnknownOS; }
+
+  bool isGNUEnvironment() const {
+    EnvironmentType Env = getEnvironment();
+    return Env == Triple::GNU || Env == Triple::GNUABIN32 ||
+           Env == Triple::GNUABI64 || Env == Triple::GNUEABI ||
+           Env == Triple::GNUEABIHF || Env == Triple::GNUX32;
+  }
+
+  bool isOSContiki() const {
+    return getOS() == Triple::Contiki;
+  }
+
+  /// Tests whether the OS is Haiku.
+  bool isOSHaiku() const {
+    return getOS() == Triple::Haiku;
+  }
+
+  /// Checks if the environment could be MSVC.
+  bool isWindowsMSVCEnvironment() const {
+    return getOS() == Triple::Win32 &&
+           (getEnvironment() == Triple::UnknownEnvironment ||
+            getEnvironment() == Triple::MSVC);
+  }
+
+  /// Checks if the environment is MSVC.
+  bool isKnownWindowsMSVCEnvironment() const {
+    return getOS() == Triple::Win32 && getEnvironment() == Triple::MSVC;
+  }
+
+  bool isWindowsCoreCLREnvironment() const {
+    return getOS() == Triple::Win32 && getEnvironment() == Triple::CoreCLR;
+  }
+
+  bool isWindowsItaniumEnvironment() const {
+    return getOS() == Triple::Win32 && getEnvironment() == Triple::Itanium;
+  }
+
+  bool isWindowsCygwinEnvironment() const {
+    return getOS() == Triple::Win32 && getEnvironment() == Triple::Cygnus;
+  }
+
+  bool isWindowsGNUEnvironment() const {
+    return getOS() == Triple::Win32 && getEnvironment() == Triple::GNU;
+  }
+
+  /// Tests for either Cygwin or MinGW OS
+  bool isOSCygMing() const {
+    return isWindowsCygwinEnvironment() || isWindowsGNUEnvironment();
+  }
+
+  /// Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
+  bool isOSMSVCRT() const {
+    return isWindowsMSVCEnvironment() || isWindowsGNUEnvironment() ||
+           isWindowsItaniumEnvironment();
+  }
+
+  /// Tests whether the OS is Windows.
+  bool isOSWindows() const {
+    return getOS() == Triple::Win32;
+  }
+
+  /// Tests whether the OS is NaCl (Native Client)
+  bool isOSNaCl() const {
+    return getOS() == Triple::NaCl;
+  }
+
+  /// Tests whether the OS is Linux.
+  bool isOSLinux() const {
+    return getOS() == Triple::Linux;
+  }
+
+  /// Tests whether the OS is kFreeBSD.
+  bool isOSKFreeBSD() const {
+    return getOS() == Triple::KFreeBSD;
+  }
+
+  /// Tests whether the OS uses glibc.
+  bool isOSGlibc() const {
+    return (getOS() == Triple::Linux || getOS() == Triple::KFreeBSD) &&
+           !isAndroid();
+  }
+
+  /// Tests whether the OS uses the ELF binary format.
+  bool isOSBinFormatELF() const {
+    return getObjectFormat() == Triple::ELF;
+  }
+
+  /// Tests whether the OS uses the COFF binary format.
+  bool isOSBinFormatCOFF() const {
+    return getObjectFormat() == Triple::COFF;
+  }
+
+  /// Tests whether the environment is MachO.
+  bool isOSBinFormatMachO() const {
+    return getObjectFormat() == Triple::MachO;
+  }
+
+  /// Tests whether the OS uses the Wasm binary format.
+  bool isOSBinFormatWasm() const {
+    return getObjectFormat() == Triple::Wasm;
+  }
+
+  /// Tests whether the target is the PS4 CPU
+  bool isPS4CPU() const {
+    return getArch() == Triple::x86_64 &&
+           getVendor() == Triple::SCEI &&
+           getOS() == Triple::PS4;
+  }
+
+  /// Tests whether the target is the PS4 platform
+  bool isPS4() const {
+    return getVendor() == Triple::SCEI &&
+           getOS() == Triple::PS4;
+  }
+
+  /// Tests whether the target is Android
+  bool isAndroid() const { return getEnvironment() == Triple::Android; }
+
+  bool isAndroidVersionLT(unsigned Major) const {
+    assert(isAndroid() && "Not an Android triple!");
+
+    unsigned Env[3];
+    getEnvironmentVersion(Env[0], Env[1], Env[2]);
+
+    // 64-bit targets did not exist before API level 21 (Lollipop).
+    if (isArch64Bit() && Env[0] < 21)
+      Env[0] = 21;
+
+    return Env[0] < Major;
+  }
+
+  /// Tests whether the environment is musl-libc
+  bool isMusl() const {
+    return getEnvironment() == Triple::Musl ||
+           getEnvironment() == Triple::MuslEABI ||
+           getEnvironment() == Triple::MuslEABIHF;
+  }
+
+  /// Tests whether the target is NVPTX (32- or 64-bit).
+  bool isNVPTX() const {
+    return getArch() == Triple::nvptx || getArch() == Triple::nvptx64;
+  }
+
+  /// Tests whether the target is Thumb (little and big endian).
+  bool isThumb() const {
+    return getArch() == Triple::thumb || getArch() == Triple::thumbeb;
+  }
+
+  /// Tests whether the target is ARM (little and big endian).
+  bool isARM() const {
+    return getArch() == Triple::arm || getArch() == Triple::armeb;
+  }
+
+  /// Tests whether the target is AArch64 (little and big endian).
+  bool isAArch64() const {
+    return getArch() == Triple::aarch64 || getArch() == Triple::aarch64_be;
+  }
+
+  /// Tests whether the target supports comdat
+  bool supportsCOMDAT() const {
+    return !isOSBinFormatMachO();
+  }
+
+  /// Tests whether the target uses emulated TLS as default.
+  bool hasDefaultEmulatedTLS() const {
+    return isAndroid() || isOSOpenBSD() || isWindowsCygwinEnvironment();
+  }
+
+  /// @}
+  /// @name Mutators
+  /// @{
+
+  /// setArch - Set the architecture (first) component of the triple
+  /// to a known type.
+  void setArch(ArchType Kind);
+
+  /// setVendor - Set the vendor (second) component of the triple to a
+  /// known type.
+  void setVendor(VendorType Kind);
+
+  /// setOS - Set the operating system (third) component of the triple
+  /// to a known type.
+  void setOS(OSType Kind);
+
+  /// setEnvironment - Set the environment (fourth) component of the triple
+  /// to a known type.
+  void setEnvironment(EnvironmentType Kind);
+
+  /// setObjectFormat - Set the object file format
+  void setObjectFormat(ObjectFormatType Kind);
+
+  /// setTriple - Set all components to the new triple \p Str.
+  void setTriple(const Twine &Str);
+
+  /// setArchName - Set the architecture (first) component of the
+  /// triple by name.
+  void setArchName(StringRef Str);
+
+  /// setVendorName - Set the vendor (second) component of the triple
+  /// by name.
+  void setVendorName(StringRef Str);
+
+  /// setOSName - Set the operating system (third) component of the
+  /// triple by name.
+  void setOSName(StringRef Str);
+
+  /// setEnvironmentName - Set the optional environment (fourth)
+  /// component of the triple by name.
+  void setEnvironmentName(StringRef Str);
+
+  /// setOSAndEnvironmentName - Set the operating system and optional
+  /// environment components with a single string.
+  void setOSAndEnvironmentName(StringRef Str);
+
+  /// @}
+  /// @name Helpers to build variants of a particular triple.
+  /// @{
+
+  /// Form a triple with a 32-bit variant of the current architecture.
+  ///
+  /// This can be used to move across "families" of architectures where useful.
+  ///
+  /// \returns A new triple with a 32-bit architecture or an unknown
+  ///          architecture if no such variant can be found.
+  llvm::Triple get32BitArchVariant() const;
+
+  /// Form a triple with a 64-bit variant of the current architecture.
+  ///
+  /// This can be used to move across "families" of architectures where useful.
+  ///
+  /// \returns A new triple with a 64-bit architecture or an unknown
+  ///          architecture if no such variant can be found.
+  llvm::Triple get64BitArchVariant() const;
+
+  /// Form a triple with a big endian variant of the current architecture.
+  ///
+  /// This can be used to move across "families" of architectures where useful.
+  ///
+  /// \returns A new triple with a big endian architecture or an unknown
+  ///          architecture if no such variant can be found.
+  llvm::Triple getBigEndianArchVariant() const;
+
+  /// Form a triple with a little endian variant of the current architecture.
+  ///
+  /// This can be used to move across "families" of architectures where useful.
+  ///
+  /// \returns A new triple with a little endian architecture or an unknown
+  ///          architecture if no such variant can be found.
+  llvm::Triple getLittleEndianArchVariant() const;
+
+  /// Get the (LLVM) name of the minimum ARM CPU for the arch we are targeting.
+  ///
+  /// \param Arch the architecture name (e.g., "armv7s"). If it is an empty
+  /// string then the triple's arch name is used.
+  StringRef getARMCPUForArch(StringRef Arch = StringRef()) const;
+
+  /// Tests whether the target triple is little endian.
+  ///
+  /// \returns true if the triple is little endian, false otherwise.
+  bool isLittleEndian() const;
+
+  /// Test whether target triples are compatible.
+  bool isCompatibleWith(const Triple &Other) const;
+
+  /// Merge target triples.
+  std::string merge(const Triple &Other) const;
+
+  /// @}
+  /// @name Static helpers for IDs.
+  /// @{
+
+  /// getArchTypeName - Get the canonical name for the \p Kind architecture.
+  static StringRef getArchTypeName(ArchType Kind);
+
+  /// getArchTypePrefix - Get the "prefix" canonical name for the \p Kind
+  /// architecture. This is the prefix used by the architecture specific
+  /// builtins, and is suitable for passing to \see
+  /// Intrinsic::getIntrinsicForGCCBuiltin().
+  ///
+  /// \return - The architecture prefix, or 0 if none is defined.
+  static StringRef getArchTypePrefix(ArchType Kind);
+
+  /// getVendorTypeName - Get the canonical name for the \p Kind vendor.
+  static StringRef getVendorTypeName(VendorType Kind);
+
+  /// getOSTypeName - Get the canonical name for the \p Kind operating system.
+  static StringRef getOSTypeName(OSType Kind);
+
+  /// getEnvironmentTypeName - Get the canonical name for the \p Kind
+  /// environment.
+  static StringRef getEnvironmentTypeName(EnvironmentType Kind);
+
+  /// @}
+  /// @name Static helpers for converting alternate architecture names.
+  /// @{
+
+  /// getArchTypeForLLVMName - The canonical type for the given LLVM
+  /// architecture name (e.g., "x86").
+  static ArchType getArchTypeForLLVMName(StringRef Str);
+
+  /// @}
+};
+
+} // End llvm namespace
+
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/Twine.h b/linux-x64/clang/include/llvm/ADT/Twine.h
new file mode 100644
index 0000000..b60fd09
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/Twine.h
@@ -0,0 +1,542 @@
+//===- Twine.h - Fast Temporary String Concatenation ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_TWINE_H
+#define LLVM_ADT_TWINE_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstdint>
+#include <string>
+
+namespace llvm {
+
+  class formatv_object_base;
+  class raw_ostream;
+
+  /// Twine - A lightweight data structure for efficiently representing the
+  /// concatenation of temporary values as strings.
+  ///
+  /// A Twine is a kind of rope, it represents a concatenated string using a
+  /// binary-tree, where the string is the preorder of the nodes. Since the
+  /// Twine can be efficiently rendered into a buffer when its result is used,
+  /// it avoids the cost of generating temporary values for intermediate string
+  /// results -- particularly in cases when the Twine result is never
+  /// required. By explicitly tracking the type of leaf nodes, we can also avoid
+  /// the creation of temporary strings for conversions operations (such as
+  /// appending an integer to a string).
+  ///
+  /// A Twine is not intended for use directly and should not be stored, its
+  /// implementation relies on the ability to store pointers to temporary stack
+  /// objects which may be deallocated at the end of a statement. Twines should
+  /// only be used accepted as const references in arguments, when an API wishes
+  /// to accept possibly-concatenated strings.
+  ///
+  /// Twines support a special 'null' value, which always concatenates to form
+  /// itself, and renders as an empty string. This can be returned from APIs to
+  /// effectively nullify any concatenations performed on the result.
+  ///
+  /// \b Implementation
+  ///
+  /// Given the nature of a Twine, it is not possible for the Twine's
+  /// concatenation method to construct interior nodes; the result must be
+  /// represented inside the returned value. For this reason a Twine object
+  /// actually holds two values, the left- and right-hand sides of a
+  /// concatenation. We also have nullary Twine objects, which are effectively
+  /// sentinel values that represent empty strings.
+  ///
+  /// Thus, a Twine can effectively have zero, one, or two children. The \see
+  /// isNullary(), \see isUnary(), and \see isBinary() predicates exist for
+  /// testing the number of children.
+  ///
+  /// We maintain a number of invariants on Twine objects (FIXME: Why):
+  ///  - Nullary twines are always represented with their Kind on the left-hand
+  ///    side, and the Empty kind on the right-hand side.
+  ///  - Unary twines are always represented with the value on the left-hand
+  ///    side, and the Empty kind on the right-hand side.
+  ///  - If a Twine has another Twine as a child, that child should always be
+  ///    binary (otherwise it could have been folded into the parent).
+  ///
+  /// These invariants are check by \see isValid().
+  ///
+  /// \b Efficiency Considerations
+  ///
+  /// The Twine is designed to yield efficient and small code for common
+  /// situations. For this reason, the concat() method is inlined so that
+  /// concatenations of leaf nodes can be optimized into stores directly into a
+  /// single stack allocated object.
+  ///
+  /// In practice, not all compilers can be trusted to optimize concat() fully,
+  /// so we provide two additional methods (and accompanying operator+
+  /// overloads) to guarantee that particularly important cases (cstring plus
+  /// StringRef) codegen as desired.
+  class Twine {
+    /// NodeKind - Represent the type of an argument.
+    enum NodeKind : unsigned char {
+      /// An empty string; the result of concatenating anything with it is also
+      /// empty.
+      NullKind,
+
+      /// The empty string.
+      EmptyKind,
+
+      /// A pointer to a Twine instance.
+      TwineKind,
+
+      /// A pointer to a C string instance.
+      CStringKind,
+
+      /// A pointer to an std::string instance.
+      StdStringKind,
+
+      /// A pointer to a StringRef instance.
+      StringRefKind,
+
+      /// A pointer to a SmallString instance.
+      SmallStringKind,
+
+      /// A pointer to a formatv_object_base instance.
+      FormatvObjectKind,
+
+      /// A char value, to render as a character.
+      CharKind,
+
+      /// An unsigned int value, to render as an unsigned decimal integer.
+      DecUIKind,
+
+      /// An int value, to render as a signed decimal integer.
+      DecIKind,
+
+      /// A pointer to an unsigned long value, to render as an unsigned decimal
+      /// integer.
+      DecULKind,
+
+      /// A pointer to a long value, to render as a signed decimal integer.
+      DecLKind,
+
+      /// A pointer to an unsigned long long value, to render as an unsigned
+      /// decimal integer.
+      DecULLKind,
+
+      /// A pointer to a long long value, to render as a signed decimal integer.
+      DecLLKind,
+
+      /// A pointer to a uint64_t value, to render as an unsigned hexadecimal
+      /// integer.
+      UHexKind
+    };
+
+    union Child
+    {
+      const Twine *twine;
+      const char *cString;
+      const std::string *stdString;
+      const StringRef *stringRef;
+      const SmallVectorImpl<char> *smallString;
+      const formatv_object_base *formatvObject;
+      char character;
+      unsigned int decUI;
+      int decI;
+      const unsigned long *decUL;
+      const long *decL;
+      const unsigned long long *decULL;
+      const long long *decLL;
+      const uint64_t *uHex;
+    };
+
+    /// LHS - The prefix in the concatenation, which may be uninitialized for
+    /// Null or Empty kinds.
+    Child LHS;
+
+    /// RHS - The suffix in the concatenation, which may be uninitialized for
+    /// Null or Empty kinds.
+    Child RHS;
+
+    /// LHSKind - The NodeKind of the left hand side, \see getLHSKind().
+    NodeKind LHSKind = EmptyKind;
+
+    /// RHSKind - The NodeKind of the right hand side, \see getRHSKind().
+    NodeKind RHSKind = EmptyKind;
+
+    /// Construct a nullary twine; the kind must be NullKind or EmptyKind.
+    explicit Twine(NodeKind Kind) : LHSKind(Kind) {
+      assert(isNullary() && "Invalid kind!");
+    }
+
+    /// Construct a binary twine.
+    explicit Twine(const Twine &LHS, const Twine &RHS)
+        : LHSKind(TwineKind), RHSKind(TwineKind) {
+      this->LHS.twine = &LHS;
+      this->RHS.twine = &RHS;
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Construct a twine from explicit values.
+    explicit Twine(Child LHS, NodeKind LHSKind, Child RHS, NodeKind RHSKind)
+        : LHS(LHS), RHS(RHS), LHSKind(LHSKind), RHSKind(RHSKind) {
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Check for the null twine.
+    bool isNull() const {
+      return getLHSKind() == NullKind;
+    }
+
+    /// Check for the empty twine.
+    bool isEmpty() const {
+      return getLHSKind() == EmptyKind;
+    }
+
+    /// Check if this is a nullary twine (null or empty).
+    bool isNullary() const {
+      return isNull() || isEmpty();
+    }
+
+    /// Check if this is a unary twine.
+    bool isUnary() const {
+      return getRHSKind() == EmptyKind && !isNullary();
+    }
+
+    /// Check if this is a binary twine.
+    bool isBinary() const {
+      return getLHSKind() != NullKind && getRHSKind() != EmptyKind;
+    }
+
+    /// Check if this is a valid twine (satisfying the invariants on
+    /// order and number of arguments).
+    bool isValid() const {
+      // Nullary twines always have Empty on the RHS.
+      if (isNullary() && getRHSKind() != EmptyKind)
+        return false;
+
+      // Null should never appear on the RHS.
+      if (getRHSKind() == NullKind)
+        return false;
+
+      // The RHS cannot be non-empty if the LHS is empty.
+      if (getRHSKind() != EmptyKind && getLHSKind() == EmptyKind)
+        return false;
+
+      // A twine child should always be binary.
+      if (getLHSKind() == TwineKind &&
+          !LHS.twine->isBinary())
+        return false;
+      if (getRHSKind() == TwineKind &&
+          !RHS.twine->isBinary())
+        return false;
+
+      return true;
+    }
+
+    /// Get the NodeKind of the left-hand side.
+    NodeKind getLHSKind() const { return LHSKind; }
+
+    /// Get the NodeKind of the right-hand side.
+    NodeKind getRHSKind() const { return RHSKind; }
+
+    /// Print one child from a twine.
+    void printOneChild(raw_ostream &OS, Child Ptr, NodeKind Kind) const;
+
+    /// Print the representation of one child from a twine.
+    void printOneChildRepr(raw_ostream &OS, Child Ptr,
+                           NodeKind Kind) const;
+
+  public:
+    /// @name Constructors
+    /// @{
+
+    /// Construct from an empty string.
+    /*implicit*/ Twine() {
+      assert(isValid() && "Invalid twine!");
+    }
+
+    Twine(const Twine &) = default;
+
+    /// Construct from a C string.
+    ///
+    /// We take care here to optimize "" into the empty twine -- this will be
+    /// optimized out for string constants. This allows Twine arguments have
+    /// default "" values, without introducing unnecessary string constants.
+    /*implicit*/ Twine(const char *Str) {
+      if (Str[0] != '\0') {
+        LHS.cString = Str;
+        LHSKind = CStringKind;
+      } else
+        LHSKind = EmptyKind;
+
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Construct from an std::string.
+    /*implicit*/ Twine(const std::string &Str) : LHSKind(StdStringKind) {
+      LHS.stdString = &Str;
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Construct from a StringRef.
+    /*implicit*/ Twine(const StringRef &Str) : LHSKind(StringRefKind) {
+      LHS.stringRef = &Str;
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Construct from a SmallString.
+    /*implicit*/ Twine(const SmallVectorImpl<char> &Str)
+        : LHSKind(SmallStringKind) {
+      LHS.smallString = &Str;
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Construct from a formatv_object_base.
+    /*implicit*/ Twine(const formatv_object_base &Fmt)
+        : LHSKind(FormatvObjectKind) {
+      LHS.formatvObject = &Fmt;
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Construct from a char.
+    explicit Twine(char Val) : LHSKind(CharKind) {
+      LHS.character = Val;
+    }
+
+    /// Construct from a signed char.
+    explicit Twine(signed char Val) : LHSKind(CharKind) {
+      LHS.character = static_cast<char>(Val);
+    }
+
+    /// Construct from an unsigned char.
+    explicit Twine(unsigned char Val) : LHSKind(CharKind) {
+      LHS.character = static_cast<char>(Val);
+    }
+
+    /// Construct a twine to print \p Val as an unsigned decimal integer.
+    explicit Twine(unsigned Val) : LHSKind(DecUIKind) {
+      LHS.decUI = Val;
+    }
+
+    /// Construct a twine to print \p Val as a signed decimal integer.
+    explicit Twine(int Val) : LHSKind(DecIKind) {
+      LHS.decI = Val;
+    }
+
+    /// Construct a twine to print \p Val as an unsigned decimal integer.
+    explicit Twine(const unsigned long &Val) : LHSKind(DecULKind) {
+      LHS.decUL = &Val;
+    }
+
+    /// Construct a twine to print \p Val as a signed decimal integer.
+    explicit Twine(const long &Val) : LHSKind(DecLKind) {
+      LHS.decL = &Val;
+    }
+
+    /// Construct a twine to print \p Val as an unsigned decimal integer.
+    explicit Twine(const unsigned long long &Val) : LHSKind(DecULLKind) {
+      LHS.decULL = &Val;
+    }
+
+    /// Construct a twine to print \p Val as a signed decimal integer.
+    explicit Twine(const long long &Val) : LHSKind(DecLLKind) {
+      LHS.decLL = &Val;
+    }
+
+    // FIXME: Unfortunately, to make sure this is as efficient as possible we
+    // need extra binary constructors from particular types. We can't rely on
+    // the compiler to be smart enough to fold operator+()/concat() down to the
+    // right thing. Yet.
+
+    /// Construct as the concatenation of a C string and a StringRef.
+    /*implicit*/ Twine(const char *LHS, const StringRef &RHS)
+        : LHSKind(CStringKind), RHSKind(StringRefKind) {
+      this->LHS.cString = LHS;
+      this->RHS.stringRef = &RHS;
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Construct as the concatenation of a StringRef and a C string.
+    /*implicit*/ Twine(const StringRef &LHS, const char *RHS)
+        : LHSKind(StringRefKind), RHSKind(CStringKind) {
+      this->LHS.stringRef = &LHS;
+      this->RHS.cString = RHS;
+      assert(isValid() && "Invalid twine!");
+    }
+
+    /// Since the intended use of twines is as temporary objects, assignments
+    /// when concatenating might cause undefined behavior or stack corruptions
+    Twine &operator=(const Twine &) = delete;
+
+    /// Create a 'null' string, which is an empty string that always
+    /// concatenates to form another empty string.
+    static Twine createNull() {
+      return Twine(NullKind);
+    }
+
+    /// @}
+    /// @name Numeric Conversions
+    /// @{
+
+    // Construct a twine to print \p Val as an unsigned hexadecimal integer.
+    static Twine utohexstr(const uint64_t &Val) {
+      Child LHS, RHS;
+      LHS.uHex = &Val;
+      RHS.twine = nullptr;
+      return Twine(LHS, UHexKind, RHS, EmptyKind);
+    }
+
+    /// @}
+    /// @name Predicate Operations
+    /// @{
+
+    /// Check if this twine is trivially empty; a false return value does not
+    /// necessarily mean the twine is empty.
+    bool isTriviallyEmpty() const {
+      return isNullary();
+    }
+
+    /// Return true if this twine can be dynamically accessed as a single
+    /// StringRef value with getSingleStringRef().
+    bool isSingleStringRef() const {
+      if (getRHSKind() != EmptyKind) return false;
+
+      switch (getLHSKind()) {
+      case EmptyKind:
+      case CStringKind:
+      case StdStringKind:
+      case StringRefKind:
+      case SmallStringKind:
+        return true;
+      default:
+        return false;
+      }
+    }
+
+    /// @}
+    /// @name String Operations
+    /// @{
+
+    Twine concat(const Twine &Suffix) const;
+
+    /// @}
+    /// @name Output & Conversion.
+    /// @{
+
+    /// Return the twine contents as a std::string.
+    std::string str() const;
+
+    /// Append the concatenated string into the given SmallString or SmallVector.
+    void toVector(SmallVectorImpl<char> &Out) const;
+
+    /// This returns the twine as a single StringRef.  This method is only valid
+    /// if isSingleStringRef() is true.
+    StringRef getSingleStringRef() const {
+      assert(isSingleStringRef() &&"This cannot be had as a single stringref!");
+      switch (getLHSKind()) {
+      default: llvm_unreachable("Out of sync with isSingleStringRef");
+      case EmptyKind:      return StringRef();
+      case CStringKind:    return StringRef(LHS.cString);
+      case StdStringKind:  return StringRef(*LHS.stdString);
+      case StringRefKind:  return *LHS.stringRef;
+      case SmallStringKind:
+        return StringRef(LHS.smallString->data(), LHS.smallString->size());
+      }
+    }
+
+    /// This returns the twine as a single StringRef if it can be
+    /// represented as such. Otherwise the twine is written into the given
+    /// SmallVector and a StringRef to the SmallVector's data is returned.
+    StringRef toStringRef(SmallVectorImpl<char> &Out) const {
+      if (isSingleStringRef())
+        return getSingleStringRef();
+      toVector(Out);
+      return StringRef(Out.data(), Out.size());
+    }
+
+    /// This returns the twine as a single null terminated StringRef if it
+    /// can be represented as such. Otherwise the twine is written into the
+    /// given SmallVector and a StringRef to the SmallVector's data is returned.
+    ///
+    /// The returned StringRef's size does not include the null terminator.
+    StringRef toNullTerminatedStringRef(SmallVectorImpl<char> &Out) const;
+
+    /// Write the concatenated string represented by this twine to the
+    /// stream \p OS.
+    void print(raw_ostream &OS) const;
+
+    /// Dump the concatenated string represented by this twine to stderr.
+    void dump() const;
+
+    /// Write the representation of this twine to the stream \p OS.
+    void printRepr(raw_ostream &OS) const;
+
+    /// Dump the representation of this twine to stderr.
+    void dumpRepr() const;
+
+    /// @}
+  };
+
+  /// @name Twine Inline Implementations
+  /// @{
+
+  inline Twine Twine::concat(const Twine &Suffix) const {
+    // Concatenation with null is null.
+    if (isNull() || Suffix.isNull())
+      return Twine(NullKind);
+
+    // Concatenation with empty yields the other side.
+    if (isEmpty())
+      return Suffix;
+    if (Suffix.isEmpty())
+      return *this;
+
+    // Otherwise we need to create a new node, taking care to fold in unary
+    // twines.
+    Child NewLHS, NewRHS;
+    NewLHS.twine = this;
+    NewRHS.twine = &Suffix;
+    NodeKind NewLHSKind = TwineKind, NewRHSKind = TwineKind;
+    if (isUnary()) {
+      NewLHS = LHS;
+      NewLHSKind = getLHSKind();
+    }
+    if (Suffix.isUnary()) {
+      NewRHS = Suffix.LHS;
+      NewRHSKind = Suffix.getLHSKind();
+    }
+
+    return Twine(NewLHS, NewLHSKind, NewRHS, NewRHSKind);
+  }
+
+  inline Twine operator+(const Twine &LHS, const Twine &RHS) {
+    return LHS.concat(RHS);
+  }
+
+  /// Additional overload to guarantee simplified codegen; this is equivalent to
+  /// concat().
+
+  inline Twine operator+(const char *LHS, const StringRef &RHS) {
+    return Twine(LHS, RHS);
+  }
+
+  /// Additional overload to guarantee simplified codegen; this is equivalent to
+  /// concat().
+
+  inline Twine operator+(const StringRef &LHS, const char *RHS) {
+    return Twine(LHS, RHS);
+  }
+
+  inline raw_ostream &operator<<(raw_ostream &OS, const Twine &RHS) {
+    RHS.print(OS);
+    return OS;
+  }
+
+  /// @}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_TWINE_H
diff --git a/linux-x64/clang/include/llvm/ADT/UniqueVector.h b/linux-x64/clang/include/llvm/ADT/UniqueVector.h
new file mode 100644
index 0000000..b17fb23
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/UniqueVector.h
@@ -0,0 +1,102 @@
+//===- llvm/ADT/UniqueVector.h ----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_UNIQUEVECTOR_H
+#define LLVM_ADT_UNIQUEVECTOR_H
+
+#include <cassert>
+#include <cstddef>
+#include <map>
+#include <vector>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+/// UniqueVector - This class produces a sequential ID number (base 1) for each
+/// unique entry that is added.  T is the type of entries in the vector. This
+/// class should have an implementation of operator== and of operator<.
+/// Entries can be fetched using operator[] with the entry ID.
+template<class T> class UniqueVector {
+public:
+  using VectorType = typename std::vector<T>;
+  using iterator = typename VectorType::iterator;
+  using const_iterator = typename VectorType::const_iterator;
+
+private:
+  // Map - Used to handle the correspondence of entry to ID.
+  std::map<T, unsigned> Map;
+
+  // Vector - ID ordered vector of entries. Entries can be indexed by ID - 1.
+  VectorType Vector;
+
+public:
+  /// insert - Append entry to the vector if it doesn't already exist.  Returns
+  /// the entry's index + 1 to be used as a unique ID.
+  unsigned insert(const T &Entry) {
+    // Check if the entry is already in the map.
+    unsigned &Val = Map[Entry];
+
+    // See if entry exists, if so return prior ID.
+    if (Val) return Val;
+
+    // Compute ID for entry.
+    Val = static_cast<unsigned>(Vector.size()) + 1;
+
+    // Insert in vector.
+    Vector.push_back(Entry);
+    return Val;
+  }
+
+  /// idFor - return the ID for an existing entry.  Returns 0 if the entry is
+  /// not found.
+  unsigned idFor(const T &Entry) const {
+    // Search for entry in the map.
+    typename std::map<T, unsigned>::const_iterator MI = Map.find(Entry);
+
+    // See if entry exists, if so return ID.
+    if (MI != Map.end()) return MI->second;
+
+    // No luck.
+    return 0;
+  }
+
+  /// operator[] - Returns a reference to the entry with the specified ID.
+  const T &operator[](unsigned ID) const {
+    assert(ID-1 < size() && "ID is 0 or out of range!");
+    return Vector[ID - 1];
+  }
+
+  /// \brief Return an iterator to the start of the vector.
+  iterator begin() { return Vector.begin(); }
+
+  /// \brief Return an iterator to the start of the vector.
+  const_iterator begin() const { return Vector.begin(); }
+
+  /// \brief Return an iterator to the end of the vector.
+  iterator end() { return Vector.end(); }
+
+  /// \brief Return an iterator to the end of the vector.
+  const_iterator end() const { return Vector.end(); }
+
+  /// size - Returns the number of entries in the vector.
+  size_t size() const { return Vector.size(); }
+
+  /// empty - Returns true if the vector is empty.
+  bool empty() const { return Vector.empty(); }
+
+  /// reset - Clears all the entries.
+  void reset() {
+    Map.clear();
+    Vector.resize(0, 0);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_UNIQUEVECTOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/VariadicFunction.h b/linux-x64/clang/include/llvm/ADT/VariadicFunction.h
new file mode 100644
index 0000000..403130c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/VariadicFunction.h
@@ -0,0 +1,331 @@
+//===--- VariadicFunctions.h - Variadic Functions ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file implements compile-time type-safe variadic functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_VARIADICFUNCTION_H
+#define LLVM_ADT_VARIADICFUNCTION_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+namespace llvm {
+
+// Define macros to aid in expanding a comma separated series with the index of
+// the series pasted onto the last token.
+#define LLVM_COMMA_JOIN1(x) x ## 0
+#define LLVM_COMMA_JOIN2(x) LLVM_COMMA_JOIN1(x), x ## 1
+#define LLVM_COMMA_JOIN3(x) LLVM_COMMA_JOIN2(x), x ## 2
+#define LLVM_COMMA_JOIN4(x) LLVM_COMMA_JOIN3(x), x ## 3
+#define LLVM_COMMA_JOIN5(x) LLVM_COMMA_JOIN4(x), x ## 4
+#define LLVM_COMMA_JOIN6(x) LLVM_COMMA_JOIN5(x), x ## 5
+#define LLVM_COMMA_JOIN7(x) LLVM_COMMA_JOIN6(x), x ## 6
+#define LLVM_COMMA_JOIN8(x) LLVM_COMMA_JOIN7(x), x ## 7
+#define LLVM_COMMA_JOIN9(x) LLVM_COMMA_JOIN8(x), x ## 8
+#define LLVM_COMMA_JOIN10(x) LLVM_COMMA_JOIN9(x), x ## 9
+#define LLVM_COMMA_JOIN11(x) LLVM_COMMA_JOIN10(x), x ## 10
+#define LLVM_COMMA_JOIN12(x) LLVM_COMMA_JOIN11(x), x ## 11
+#define LLVM_COMMA_JOIN13(x) LLVM_COMMA_JOIN12(x), x ## 12
+#define LLVM_COMMA_JOIN14(x) LLVM_COMMA_JOIN13(x), x ## 13
+#define LLVM_COMMA_JOIN15(x) LLVM_COMMA_JOIN14(x), x ## 14
+#define LLVM_COMMA_JOIN16(x) LLVM_COMMA_JOIN15(x), x ## 15
+#define LLVM_COMMA_JOIN17(x) LLVM_COMMA_JOIN16(x), x ## 16
+#define LLVM_COMMA_JOIN18(x) LLVM_COMMA_JOIN17(x), x ## 17
+#define LLVM_COMMA_JOIN19(x) LLVM_COMMA_JOIN18(x), x ## 18
+#define LLVM_COMMA_JOIN20(x) LLVM_COMMA_JOIN19(x), x ## 19
+#define LLVM_COMMA_JOIN21(x) LLVM_COMMA_JOIN20(x), x ## 20
+#define LLVM_COMMA_JOIN22(x) LLVM_COMMA_JOIN21(x), x ## 21
+#define LLVM_COMMA_JOIN23(x) LLVM_COMMA_JOIN22(x), x ## 22
+#define LLVM_COMMA_JOIN24(x) LLVM_COMMA_JOIN23(x), x ## 23
+#define LLVM_COMMA_JOIN25(x) LLVM_COMMA_JOIN24(x), x ## 24
+#define LLVM_COMMA_JOIN26(x) LLVM_COMMA_JOIN25(x), x ## 25
+#define LLVM_COMMA_JOIN27(x) LLVM_COMMA_JOIN26(x), x ## 26
+#define LLVM_COMMA_JOIN28(x) LLVM_COMMA_JOIN27(x), x ## 27
+#define LLVM_COMMA_JOIN29(x) LLVM_COMMA_JOIN28(x), x ## 28
+#define LLVM_COMMA_JOIN30(x) LLVM_COMMA_JOIN29(x), x ## 29
+#define LLVM_COMMA_JOIN31(x) LLVM_COMMA_JOIN30(x), x ## 30
+#define LLVM_COMMA_JOIN32(x) LLVM_COMMA_JOIN31(x), x ## 31
+
+/// \brief Class which can simulate a type-safe variadic function.
+///
+/// The VariadicFunction class template makes it easy to define
+/// type-safe variadic functions where all arguments have the same
+/// type.
+///
+/// Suppose we need a variadic function like this:
+///
+///   ResultT Foo(const ArgT &A_0, const ArgT &A_1, ..., const ArgT &A_N);
+///
+/// Instead of many overloads of Foo(), we only need to define a helper
+/// function that takes an array of arguments:
+///
+///   ResultT FooImpl(ArrayRef<const ArgT *> Args) {
+///     // 'Args[i]' is a pointer to the i-th argument passed to Foo().
+///     ...
+///   }
+///
+/// and then define Foo() like this:
+///
+///   const VariadicFunction<ResultT, ArgT, FooImpl> Foo;
+///
+/// VariadicFunction takes care of defining the overloads of Foo().
+///
+/// Actually, Foo is a function object (i.e. functor) instead of a plain
+/// function.  This object is stateless and its constructor/destructor
+/// does nothing, so it's safe to create global objects and call Foo(...) at
+/// any time.
+///
+/// Sometimes we need a variadic function to have some fixed leading
+/// arguments whose types may be different from that of the optional
+/// arguments.  For example:
+///
+///   bool FullMatch(const StringRef &S, const RE &Regex,
+///                  const ArgT &A_0, ..., const ArgT &A_N);
+///
+/// VariadicFunctionN is for such cases, where N is the number of fixed
+/// arguments.  It is like VariadicFunction, except that it takes N more
+/// template arguments for the types of the fixed arguments:
+///
+///   bool FullMatchImpl(const StringRef &S, const RE &Regex,
+///                      ArrayRef<const ArgT *> Args) { ... }
+///   const VariadicFunction2<bool, const StringRef&,
+///                           const RE&, ArgT, FullMatchImpl>
+///       FullMatch;
+///
+/// Currently VariadicFunction and friends support up-to 3
+/// fixed leading arguments and up-to 32 optional arguments.
+template <typename ResultT, typename ArgT,
+          ResultT (*Func)(ArrayRef<const ArgT *>)>
+struct VariadicFunction {
+  ResultT operator()() const {
+    return Func(None);
+  }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+  ResultT operator()(LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+    const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+    return Func(makeArrayRef(Args)); \
+  }
+  LLVM_DEFINE_OVERLOAD(1)
+  LLVM_DEFINE_OVERLOAD(2)
+  LLVM_DEFINE_OVERLOAD(3)
+  LLVM_DEFINE_OVERLOAD(4)
+  LLVM_DEFINE_OVERLOAD(5)
+  LLVM_DEFINE_OVERLOAD(6)
+  LLVM_DEFINE_OVERLOAD(7)
+  LLVM_DEFINE_OVERLOAD(8)
+  LLVM_DEFINE_OVERLOAD(9)
+  LLVM_DEFINE_OVERLOAD(10)
+  LLVM_DEFINE_OVERLOAD(11)
+  LLVM_DEFINE_OVERLOAD(12)
+  LLVM_DEFINE_OVERLOAD(13)
+  LLVM_DEFINE_OVERLOAD(14)
+  LLVM_DEFINE_OVERLOAD(15)
+  LLVM_DEFINE_OVERLOAD(16)
+  LLVM_DEFINE_OVERLOAD(17)
+  LLVM_DEFINE_OVERLOAD(18)
+  LLVM_DEFINE_OVERLOAD(19)
+  LLVM_DEFINE_OVERLOAD(20)
+  LLVM_DEFINE_OVERLOAD(21)
+  LLVM_DEFINE_OVERLOAD(22)
+  LLVM_DEFINE_OVERLOAD(23)
+  LLVM_DEFINE_OVERLOAD(24)
+  LLVM_DEFINE_OVERLOAD(25)
+  LLVM_DEFINE_OVERLOAD(26)
+  LLVM_DEFINE_OVERLOAD(27)
+  LLVM_DEFINE_OVERLOAD(28)
+  LLVM_DEFINE_OVERLOAD(29)
+  LLVM_DEFINE_OVERLOAD(30)
+  LLVM_DEFINE_OVERLOAD(31)
+  LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+template <typename ResultT, typename Param0T, typename ArgT,
+          ResultT (*Func)(Param0T, ArrayRef<const ArgT *>)>
+struct VariadicFunction1 {
+  ResultT operator()(Param0T P0) const {
+    return Func(P0, None);
+  }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+  ResultT operator()(Param0T P0, LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+    const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+    return Func(P0, makeArrayRef(Args)); \
+  }
+  LLVM_DEFINE_OVERLOAD(1)
+  LLVM_DEFINE_OVERLOAD(2)
+  LLVM_DEFINE_OVERLOAD(3)
+  LLVM_DEFINE_OVERLOAD(4)
+  LLVM_DEFINE_OVERLOAD(5)
+  LLVM_DEFINE_OVERLOAD(6)
+  LLVM_DEFINE_OVERLOAD(7)
+  LLVM_DEFINE_OVERLOAD(8)
+  LLVM_DEFINE_OVERLOAD(9)
+  LLVM_DEFINE_OVERLOAD(10)
+  LLVM_DEFINE_OVERLOAD(11)
+  LLVM_DEFINE_OVERLOAD(12)
+  LLVM_DEFINE_OVERLOAD(13)
+  LLVM_DEFINE_OVERLOAD(14)
+  LLVM_DEFINE_OVERLOAD(15)
+  LLVM_DEFINE_OVERLOAD(16)
+  LLVM_DEFINE_OVERLOAD(17)
+  LLVM_DEFINE_OVERLOAD(18)
+  LLVM_DEFINE_OVERLOAD(19)
+  LLVM_DEFINE_OVERLOAD(20)
+  LLVM_DEFINE_OVERLOAD(21)
+  LLVM_DEFINE_OVERLOAD(22)
+  LLVM_DEFINE_OVERLOAD(23)
+  LLVM_DEFINE_OVERLOAD(24)
+  LLVM_DEFINE_OVERLOAD(25)
+  LLVM_DEFINE_OVERLOAD(26)
+  LLVM_DEFINE_OVERLOAD(27)
+  LLVM_DEFINE_OVERLOAD(28)
+  LLVM_DEFINE_OVERLOAD(29)
+  LLVM_DEFINE_OVERLOAD(30)
+  LLVM_DEFINE_OVERLOAD(31)
+  LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+template <typename ResultT, typename Param0T, typename Param1T, typename ArgT,
+          ResultT (*Func)(Param0T, Param1T, ArrayRef<const ArgT *>)>
+struct VariadicFunction2 {
+  ResultT operator()(Param0T P0, Param1T P1) const {
+    return Func(P0, P1, None);
+  }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+  ResultT operator()(Param0T P0, Param1T P1, \
+                     LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+    const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+    return Func(P0, P1, makeArrayRef(Args)); \
+  }
+  LLVM_DEFINE_OVERLOAD(1)
+  LLVM_DEFINE_OVERLOAD(2)
+  LLVM_DEFINE_OVERLOAD(3)
+  LLVM_DEFINE_OVERLOAD(4)
+  LLVM_DEFINE_OVERLOAD(5)
+  LLVM_DEFINE_OVERLOAD(6)
+  LLVM_DEFINE_OVERLOAD(7)
+  LLVM_DEFINE_OVERLOAD(8)
+  LLVM_DEFINE_OVERLOAD(9)
+  LLVM_DEFINE_OVERLOAD(10)
+  LLVM_DEFINE_OVERLOAD(11)
+  LLVM_DEFINE_OVERLOAD(12)
+  LLVM_DEFINE_OVERLOAD(13)
+  LLVM_DEFINE_OVERLOAD(14)
+  LLVM_DEFINE_OVERLOAD(15)
+  LLVM_DEFINE_OVERLOAD(16)
+  LLVM_DEFINE_OVERLOAD(17)
+  LLVM_DEFINE_OVERLOAD(18)
+  LLVM_DEFINE_OVERLOAD(19)
+  LLVM_DEFINE_OVERLOAD(20)
+  LLVM_DEFINE_OVERLOAD(21)
+  LLVM_DEFINE_OVERLOAD(22)
+  LLVM_DEFINE_OVERLOAD(23)
+  LLVM_DEFINE_OVERLOAD(24)
+  LLVM_DEFINE_OVERLOAD(25)
+  LLVM_DEFINE_OVERLOAD(26)
+  LLVM_DEFINE_OVERLOAD(27)
+  LLVM_DEFINE_OVERLOAD(28)
+  LLVM_DEFINE_OVERLOAD(29)
+  LLVM_DEFINE_OVERLOAD(30)
+  LLVM_DEFINE_OVERLOAD(31)
+  LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+template <typename ResultT, typename Param0T, typename Param1T,
+          typename Param2T, typename ArgT,
+          ResultT (*Func)(Param0T, Param1T, Param2T, ArrayRef<const ArgT *>)>
+struct VariadicFunction3 {
+  ResultT operator()(Param0T P0, Param1T P1, Param2T P2) const {
+    return Func(P0, P1, P2, None);
+  }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+  ResultT operator()(Param0T P0, Param1T P1, Param2T P2, \
+                     LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+    const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+    return Func(P0, P1, P2, makeArrayRef(Args)); \
+  }
+  LLVM_DEFINE_OVERLOAD(1)
+  LLVM_DEFINE_OVERLOAD(2)
+  LLVM_DEFINE_OVERLOAD(3)
+  LLVM_DEFINE_OVERLOAD(4)
+  LLVM_DEFINE_OVERLOAD(5)
+  LLVM_DEFINE_OVERLOAD(6)
+  LLVM_DEFINE_OVERLOAD(7)
+  LLVM_DEFINE_OVERLOAD(8)
+  LLVM_DEFINE_OVERLOAD(9)
+  LLVM_DEFINE_OVERLOAD(10)
+  LLVM_DEFINE_OVERLOAD(11)
+  LLVM_DEFINE_OVERLOAD(12)
+  LLVM_DEFINE_OVERLOAD(13)
+  LLVM_DEFINE_OVERLOAD(14)
+  LLVM_DEFINE_OVERLOAD(15)
+  LLVM_DEFINE_OVERLOAD(16)
+  LLVM_DEFINE_OVERLOAD(17)
+  LLVM_DEFINE_OVERLOAD(18)
+  LLVM_DEFINE_OVERLOAD(19)
+  LLVM_DEFINE_OVERLOAD(20)
+  LLVM_DEFINE_OVERLOAD(21)
+  LLVM_DEFINE_OVERLOAD(22)
+  LLVM_DEFINE_OVERLOAD(23)
+  LLVM_DEFINE_OVERLOAD(24)
+  LLVM_DEFINE_OVERLOAD(25)
+  LLVM_DEFINE_OVERLOAD(26)
+  LLVM_DEFINE_OVERLOAD(27)
+  LLVM_DEFINE_OVERLOAD(28)
+  LLVM_DEFINE_OVERLOAD(29)
+  LLVM_DEFINE_OVERLOAD(30)
+  LLVM_DEFINE_OVERLOAD(31)
+  LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+// Cleanup the macro namespace.
+#undef LLVM_COMMA_JOIN1
+#undef LLVM_COMMA_JOIN2
+#undef LLVM_COMMA_JOIN3
+#undef LLVM_COMMA_JOIN4
+#undef LLVM_COMMA_JOIN5
+#undef LLVM_COMMA_JOIN6
+#undef LLVM_COMMA_JOIN7
+#undef LLVM_COMMA_JOIN8
+#undef LLVM_COMMA_JOIN9
+#undef LLVM_COMMA_JOIN10
+#undef LLVM_COMMA_JOIN11
+#undef LLVM_COMMA_JOIN12
+#undef LLVM_COMMA_JOIN13
+#undef LLVM_COMMA_JOIN14
+#undef LLVM_COMMA_JOIN15
+#undef LLVM_COMMA_JOIN16
+#undef LLVM_COMMA_JOIN17
+#undef LLVM_COMMA_JOIN18
+#undef LLVM_COMMA_JOIN19
+#undef LLVM_COMMA_JOIN20
+#undef LLVM_COMMA_JOIN21
+#undef LLVM_COMMA_JOIN22
+#undef LLVM_COMMA_JOIN23
+#undef LLVM_COMMA_JOIN24
+#undef LLVM_COMMA_JOIN25
+#undef LLVM_COMMA_JOIN26
+#undef LLVM_COMMA_JOIN27
+#undef LLVM_COMMA_JOIN28
+#undef LLVM_COMMA_JOIN29
+#undef LLVM_COMMA_JOIN30
+#undef LLVM_COMMA_JOIN31
+#undef LLVM_COMMA_JOIN32
+
+} // end namespace llvm
+
+#endif  // LLVM_ADT_VARIADICFUNCTION_H
diff --git a/linux-x64/clang/include/llvm/ADT/edit_distance.h b/linux-x64/clang/include/llvm/ADT/edit_distance.h
new file mode 100644
index 0000000..06a01b1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/edit_distance.h
@@ -0,0 +1,103 @@
+//===-- llvm/ADT/edit_distance.h - Array edit distance function --- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a Levenshtein distance function that works for any two
+// sequences, with each element of each sequence being analogous to a character
+// in a string.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_EDIT_DISTANCE_H
+#define LLVM_ADT_EDIT_DISTANCE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include <algorithm>
+#include <memory>
+
+namespace llvm {
+
+/// \brief Determine the edit distance between two sequences.
+///
+/// \param FromArray the first sequence to compare.
+///
+/// \param ToArray the second sequence to compare.
+///
+/// \param AllowReplacements whether to allow element replacements (change one
+/// element into another) as a single operation, rather than as two operations
+/// (an insertion and a removal).
+///
+/// \param MaxEditDistance If non-zero, the maximum edit distance that this
+/// routine is allowed to compute. If the edit distance will exceed that
+/// maximum, returns \c MaxEditDistance+1.
+///
+/// \returns the minimum number of element insertions, removals, or (if
+/// \p AllowReplacements is \c true) replacements needed to transform one of
+/// the given sequences into the other. If zero, the sequences are identical.
+template<typename T>
+unsigned ComputeEditDistance(ArrayRef<T> FromArray, ArrayRef<T> ToArray,
+                             bool AllowReplacements = true,
+                             unsigned MaxEditDistance = 0) {
+  // The algorithm implemented below is the "classic"
+  // dynamic-programming algorithm for computing the Levenshtein
+  // distance, which is described here:
+  //
+  //   http://en.wikipedia.org/wiki/Levenshtein_distance
+  //
+  // Although the algorithm is typically described using an m x n
+  // array, only one row plus one element are used at a time, so this
+  // implementation just keeps one vector for the row.  To update one entry,
+  // only the entries to the left, top, and top-left are needed.  The left
+  // entry is in Row[x-1], the top entry is what's in Row[x] from the last
+  // iteration, and the top-left entry is stored in Previous.
+  typename ArrayRef<T>::size_type m = FromArray.size();
+  typename ArrayRef<T>::size_type n = ToArray.size();
+
+  const unsigned SmallBufferSize = 64;
+  unsigned SmallBuffer[SmallBufferSize];
+  std::unique_ptr<unsigned[]> Allocated;
+  unsigned *Row = SmallBuffer;
+  if (n + 1 > SmallBufferSize) {
+    Row = new unsigned[n + 1];
+    Allocated.reset(Row);
+  }
+
+  for (unsigned i = 1; i <= n; ++i)
+    Row[i] = i;
+
+  for (typename ArrayRef<T>::size_type y = 1; y <= m; ++y) {
+    Row[0] = y;
+    unsigned BestThisRow = Row[0];
+
+    unsigned Previous = y - 1;
+    for (typename ArrayRef<T>::size_type x = 1; x <= n; ++x) {
+      int OldRow = Row[x];
+      if (AllowReplacements) {
+        Row[x] = std::min(
+            Previous + (FromArray[y-1] == ToArray[x-1] ? 0u : 1u),
+            std::min(Row[x-1], Row[x])+1);
+      }
+      else {
+        if (FromArray[y-1] == ToArray[x-1]) Row[x] = Previous;
+        else Row[x] = std::min(Row[x-1], Row[x]) + 1;
+      }
+      Previous = OldRow;
+      BestThisRow = std::min(BestThisRow, Row[x]);
+    }
+
+    if (MaxEditDistance && BestThisRow > MaxEditDistance)
+      return MaxEditDistance + 1;
+  }
+
+  unsigned Result = Row[n];
+  return Result;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/ilist.h b/linux-x64/clang/include/llvm/ADT/ilist.h
new file mode 100644
index 0000000..a788f81
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ilist.h
@@ -0,0 +1,434 @@
+//==-- llvm/ADT/ilist.h - Intrusive Linked List Template ---------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes to implement an intrusive doubly linked list class
+// (i.e. each node of the list must contain a next and previous field for the
+// list.
+//
+// The ilist class itself should be a plug in replacement for list.  This list
+// replacement does not provide a constant time size() method, so be careful to
+// use empty() when you really want to know if it's empty.
+//
+// The ilist class is implemented as a circular list.  The list itself contains
+// a sentinel node, whose Next points at begin() and whose Prev points at
+// rbegin().  The sentinel node itself serves as end() and rend().
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_H
+#define LLVM_ADT_ILIST_H
+
+#include "llvm/ADT/simple_ilist.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+
+namespace llvm {
+
+/// Use delete by default for iplist and ilist.
+///
+/// Specialize this to get different behaviour for ownership-related API.  (If
+/// you really want ownership semantics, consider using std::list or building
+/// something like \a BumpPtrList.)
+///
+/// \see ilist_noalloc_traits
+template <typename NodeTy> struct ilist_alloc_traits {
+  static void deleteNode(NodeTy *V) { delete V; }
+};
+
+/// Custom traits to do nothing on deletion.
+///
+/// Specialize ilist_alloc_traits to inherit from this to disable the
+/// non-intrusive deletion in iplist (which implies ownership).
+///
+/// If you want purely intrusive semantics with no callbacks, consider using \a
+/// simple_ilist instead.
+///
+/// \code
+/// template <>
+/// struct ilist_alloc_traits<MyType> : ilist_noalloc_traits<MyType> {};
+/// \endcode
+template <typename NodeTy> struct ilist_noalloc_traits {
+  static void deleteNode(NodeTy *V) {}
+};
+
+/// Callbacks do nothing by default in iplist and ilist.
+///
+/// Specialize this for to use callbacks for when nodes change their list
+/// membership.
+template <typename NodeTy> struct ilist_callback_traits {
+  void addNodeToList(NodeTy *) {}
+  void removeNodeFromList(NodeTy *) {}
+
+  /// Callback before transferring nodes to this list.
+  ///
+  /// \pre \c this!=&OldList
+  template <class Iterator>
+  void transferNodesFromList(ilist_callback_traits &OldList, Iterator /*first*/,
+                             Iterator /*last*/) {
+    (void)OldList;
+  }
+};
+
+/// A fragment for template traits for intrusive list that provides default
+/// node related operations.
+///
+/// TODO: Remove this layer of indirection.  It's not necessary.
+template <typename NodeTy>
+struct ilist_node_traits : ilist_alloc_traits<NodeTy>,
+                           ilist_callback_traits<NodeTy> {};
+
+/// Default template traits for intrusive list.
+///
+/// By inheriting from this, you can easily use default implementations for all
+/// common operations.
+///
+/// TODO: Remove this customization point.  Specializing ilist_traits is
+/// already fully general.
+template <typename NodeTy>
+struct ilist_default_traits : public ilist_node_traits<NodeTy> {};
+
+/// Template traits for intrusive list.
+///
+/// Customize callbacks and allocation semantics.
+template <typename NodeTy>
+struct ilist_traits : public ilist_default_traits<NodeTy> {};
+
+/// Const traits should never be instantiated.
+template <typename Ty> struct ilist_traits<const Ty> {};
+
+namespace ilist_detail {
+
+template <class T> T &make();
+
+/// Type trait to check for a traits class that has a getNext member (as a
+/// canary for any of the ilist_nextprev_traits API).
+template <class TraitsT, class NodeT> struct HasGetNext {
+  typedef char Yes[1];
+  typedef char No[2];
+  template <size_t N> struct SFINAE {};
+
+  template <class U>
+  static Yes &test(U *I, decltype(I->getNext(&make<NodeT>())) * = 0);
+  template <class> static No &test(...);
+
+public:
+  static const bool value = sizeof(test<TraitsT>(nullptr)) == sizeof(Yes);
+};
+
+/// Type trait to check for a traits class that has a createSentinel member (as
+/// a canary for any of the ilist_sentinel_traits API).
+template <class TraitsT> struct HasCreateSentinel {
+  typedef char Yes[1];
+  typedef char No[2];
+
+  template <class U>
+  static Yes &test(U *I, decltype(I->createSentinel()) * = 0);
+  template <class> static No &test(...);
+
+public:
+  static const bool value = sizeof(test<TraitsT>(nullptr)) == sizeof(Yes);
+};
+
+/// Type trait to check for a traits class that has a createNode member.
+/// Allocation should be managed in a wrapper class, instead of in
+/// ilist_traits.
+template <class TraitsT, class NodeT> struct HasCreateNode {
+  typedef char Yes[1];
+  typedef char No[2];
+  template <size_t N> struct SFINAE {};
+
+  template <class U>
+  static Yes &test(U *I, decltype(I->createNode(make<NodeT>())) * = 0);
+  template <class> static No &test(...);
+
+public:
+  static const bool value = sizeof(test<TraitsT>(nullptr)) == sizeof(Yes);
+};
+
+template <class TraitsT, class NodeT> struct HasObsoleteCustomization {
+  static const bool value = HasGetNext<TraitsT, NodeT>::value ||
+                            HasCreateSentinel<TraitsT>::value ||
+                            HasCreateNode<TraitsT, NodeT>::value;
+};
+
+} // end namespace ilist_detail
+
+//===----------------------------------------------------------------------===//
+//
+/// A wrapper around an intrusive list with callbacks and non-intrusive
+/// ownership.
+///
+/// This wraps a purely intrusive list (like simple_ilist) with a configurable
+/// traits class.  The traits can implement callbacks and customize the
+/// ownership semantics.
+///
+/// This is a subset of ilist functionality that can safely be used on nodes of
+/// polymorphic types, i.e. a heterogeneous list with a common base class that
+/// holds the next/prev pointers.  The only state of the list itself is an
+/// ilist_sentinel, which holds pointers to the first and last nodes in the
+/// list.
+template <class IntrusiveListT, class TraitsT>
+class iplist_impl : public TraitsT, IntrusiveListT {
+  typedef IntrusiveListT base_list_type;
+
+protected:
+  typedef iplist_impl iplist_impl_type;
+
+public:
+  typedef typename base_list_type::pointer pointer;
+  typedef typename base_list_type::const_pointer const_pointer;
+  typedef typename base_list_type::reference reference;
+  typedef typename base_list_type::const_reference const_reference;
+  typedef typename base_list_type::value_type value_type;
+  typedef typename base_list_type::size_type size_type;
+  typedef typename base_list_type::difference_type difference_type;
+  typedef typename base_list_type::iterator iterator;
+  typedef typename base_list_type::const_iterator const_iterator;
+  typedef typename base_list_type::reverse_iterator reverse_iterator;
+  typedef
+      typename base_list_type::const_reverse_iterator const_reverse_iterator;
+
+private:
+  // TODO: Drop this assertion and the transitive type traits anytime after
+  // v4.0 is branched (i.e,. keep them for one release to help out-of-tree code
+  // update).
+  static_assert(
+      !ilist_detail::HasObsoleteCustomization<TraitsT, value_type>::value,
+      "ilist customization points have changed!");
+
+  static bool op_less(const_reference L, const_reference R) { return L < R; }
+  static bool op_equal(const_reference L, const_reference R) { return L == R; }
+
+public:
+  iplist_impl() = default;
+
+  iplist_impl(const iplist_impl &) = delete;
+  iplist_impl &operator=(const iplist_impl &) = delete;
+
+  iplist_impl(iplist_impl &&X)
+      : TraitsT(std::move(X)), IntrusiveListT(std::move(X)) {}
+  iplist_impl &operator=(iplist_impl &&X) {
+    *static_cast<TraitsT *>(this) = std::move(X);
+    *static_cast<IntrusiveListT *>(this) = std::move(X);
+    return *this;
+  }
+
+  ~iplist_impl() { clear(); }
+
+  // Miscellaneous inspection routines.
+  size_type max_size() const { return size_type(-1); }
+
+  using base_list_type::begin;
+  using base_list_type::end;
+  using base_list_type::rbegin;
+  using base_list_type::rend;
+  using base_list_type::empty;
+  using base_list_type::front;
+  using base_list_type::back;
+
+  void swap(iplist_impl &RHS) {
+    assert(0 && "Swap does not use list traits callback correctly yet!");
+    base_list_type::swap(RHS);
+  }
+
+  iterator insert(iterator where, pointer New) {
+    this->addNodeToList(New); // Notify traits that we added a node...
+    return base_list_type::insert(where, *New);
+  }
+
+  iterator insert(iterator where, const_reference New) {
+    return this->insert(where, new value_type(New));
+  }
+
+  iterator insertAfter(iterator where, pointer New) {
+    if (empty())
+      return insert(begin(), New);
+    else
+      return insert(++where, New);
+  }
+
+  /// Clone another list.
+  template <class Cloner> void cloneFrom(const iplist_impl &L2, Cloner clone) {
+    clear();
+    for (const_reference V : L2)
+      push_back(clone(V));
+  }
+
+  pointer remove(iterator &IT) {
+    pointer Node = &*IT++;
+    this->removeNodeFromList(Node); // Notify traits that we removed a node...
+    base_list_type::remove(*Node);
+    return Node;
+  }
+
+  pointer remove(const iterator &IT) {
+    iterator MutIt = IT;
+    return remove(MutIt);
+  }
+
+  pointer remove(pointer IT) { return remove(iterator(IT)); }
+  pointer remove(reference IT) { return remove(iterator(IT)); }
+
+  // erase - remove a node from the controlled sequence... and delete it.
+  iterator erase(iterator where) {
+    this->deleteNode(remove(where));
+    return where;
+  }
+
+  iterator erase(pointer IT) { return erase(iterator(IT)); }
+  iterator erase(reference IT) { return erase(iterator(IT)); }
+
+  /// Remove all nodes from the list like clear(), but do not call
+  /// removeNodeFromList() or deleteNode().
+  ///
+  /// This should only be used immediately before freeing nodes in bulk to
+  /// avoid traversing the list and bringing all the nodes into cache.
+  void clearAndLeakNodesUnsafely() { base_list_type::clear(); }
+
+private:
+  // transfer - The heart of the splice function.  Move linked list nodes from
+  // [first, last) into position.
+  //
+  void transfer(iterator position, iplist_impl &L2, iterator first, iterator last) {
+    if (position == last)
+      return;
+
+    if (this != &L2) // Notify traits we moved the nodes...
+      this->transferNodesFromList(L2, first, last);
+
+    base_list_type::splice(position, L2, first, last);
+  }
+
+public:
+  //===----------------------------------------------------------------------===
+  // Functionality derived from other functions defined above...
+  //
+
+  using base_list_type::size;
+
+  iterator erase(iterator first, iterator last) {
+    while (first != last)
+      first = erase(first);
+    return last;
+  }
+
+  void clear() { erase(begin(), end()); }
+
+  // Front and back inserters...
+  void push_front(pointer val) { insert(begin(), val); }
+  void push_back(pointer val) { insert(end(), val); }
+  void pop_front() {
+    assert(!empty() && "pop_front() on empty list!");
+    erase(begin());
+  }
+  void pop_back() {
+    assert(!empty() && "pop_back() on empty list!");
+    iterator t = end(); erase(--t);
+  }
+
+  // Special forms of insert...
+  template<class InIt> void insert(iterator where, InIt first, InIt last) {
+    for (; first != last; ++first) insert(where, *first);
+  }
+
+  // Splice members - defined in terms of transfer...
+  void splice(iterator where, iplist_impl &L2) {
+    if (!L2.empty())
+      transfer(where, L2, L2.begin(), L2.end());
+  }
+  void splice(iterator where, iplist_impl &L2, iterator first) {
+    iterator last = first; ++last;
+    if (where == first || where == last) return; // No change
+    transfer(where, L2, first, last);
+  }
+  void splice(iterator where, iplist_impl &L2, iterator first, iterator last) {
+    if (first != last) transfer(where, L2, first, last);
+  }
+  void splice(iterator where, iplist_impl &L2, reference N) {
+    splice(where, L2, iterator(N));
+  }
+  void splice(iterator where, iplist_impl &L2, pointer N) {
+    splice(where, L2, iterator(N));
+  }
+
+  template <class Compare>
+  void merge(iplist_impl &Right, Compare comp) {
+    if (this == &Right)
+      return;
+    this->transferNodesFromList(Right, Right.begin(), Right.end());
+    base_list_type::merge(Right, comp);
+  }
+  void merge(iplist_impl &Right) { return merge(Right, op_less); }
+
+  using base_list_type::sort;
+
+  /// \brief Get the previous node, or \c nullptr for the list head.
+  pointer getPrevNode(reference N) const {
+    auto I = N.getIterator();
+    if (I == begin())
+      return nullptr;
+    return &*std::prev(I);
+  }
+  /// \brief Get the previous node, or \c nullptr for the list head.
+  const_pointer getPrevNode(const_reference N) const {
+    return getPrevNode(const_cast<reference >(N));
+  }
+
+  /// \brief Get the next node, or \c nullptr for the list tail.
+  pointer getNextNode(reference N) const {
+    auto Next = std::next(N.getIterator());
+    if (Next == end())
+      return nullptr;
+    return &*Next;
+  }
+  /// \brief Get the next node, or \c nullptr for the list tail.
+  const_pointer getNextNode(const_reference N) const {
+    return getNextNode(const_cast<reference >(N));
+  }
+};
+
+/// An intrusive list with ownership and callbacks specified/controlled by
+/// ilist_traits, only with API safe for polymorphic types.
+///
+/// The \p Options parameters are the same as those for \a simple_ilist.  See
+/// there for a description of what's available.
+template <class T, class... Options>
+class iplist
+    : public iplist_impl<simple_ilist<T, Options...>, ilist_traits<T>> {
+  typedef typename iplist::iplist_impl_type iplist_impl_type;
+
+public:
+  iplist() = default;
+
+  iplist(const iplist &X) = delete;
+  iplist &operator=(const iplist &X) = delete;
+
+  iplist(iplist &&X) : iplist_impl_type(std::move(X)) {}
+  iplist &operator=(iplist &&X) {
+    *static_cast<iplist_impl_type *>(this) = std::move(X);
+    return *this;
+  }
+};
+
+template <class T, class... Options> using ilist = iplist<T, Options...>;
+
+} // end namespace llvm
+
+namespace std {
+
+  // Ensure that swap uses the fast list swap...
+  template<class Ty>
+  void swap(llvm::iplist<Ty> &Left, llvm::iplist<Ty> &Right) {
+    Left.swap(Right);
+  }
+
+} // end namespace std
+
+#endif // LLVM_ADT_ILIST_H
diff --git a/linux-x64/clang/include/llvm/ADT/ilist_base.h b/linux-x64/clang/include/llvm/ADT/ilist_base.h
new file mode 100644
index 0000000..3d818a4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ilist_base.h
@@ -0,0 +1,93 @@
+//===- llvm/ADT/ilist_base.h - Intrusive List Base --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_BASE_H
+#define LLVM_ADT_ILIST_BASE_H
+
+#include "llvm/ADT/ilist_node_base.h"
+#include <cassert>
+
+namespace llvm {
+
+/// Implementations of list algorithms using ilist_node_base.
+template <bool EnableSentinelTracking> class ilist_base {
+public:
+  using node_base_type = ilist_node_base<EnableSentinelTracking>;
+
+  static void insertBeforeImpl(node_base_type &Next, node_base_type &N) {
+    node_base_type &Prev = *Next.getPrev();
+    N.setNext(&Next);
+    N.setPrev(&Prev);
+    Prev.setNext(&N);
+    Next.setPrev(&N);
+  }
+
+  static void removeImpl(node_base_type &N) {
+    node_base_type *Prev = N.getPrev();
+    node_base_type *Next = N.getNext();
+    Next->setPrev(Prev);
+    Prev->setNext(Next);
+
+    // Not strictly necessary, but helps catch a class of bugs.
+    N.setPrev(nullptr);
+    N.setNext(nullptr);
+  }
+
+  static void removeRangeImpl(node_base_type &First, node_base_type &Last) {
+    node_base_type *Prev = First.getPrev();
+    node_base_type *Final = Last.getPrev();
+    Last.setPrev(Prev);
+    Prev->setNext(&Last);
+
+    // Not strictly necessary, but helps catch a class of bugs.
+    First.setPrev(nullptr);
+    Final->setNext(nullptr);
+  }
+
+  static void transferBeforeImpl(node_base_type &Next, node_base_type &First,
+                                 node_base_type &Last) {
+    if (&Next == &Last || &First == &Last)
+      return;
+
+    // Position cannot be contained in the range to be transferred.
+    assert(&Next != &First &&
+           // Check for the most common mistake.
+           "Insertion point can't be one of the transferred nodes");
+
+    node_base_type &Final = *Last.getPrev();
+
+    // Detach from old list/position.
+    First.getPrev()->setNext(&Last);
+    Last.setPrev(First.getPrev());
+
+    // Splice [First, Final] into its new list/position.
+    node_base_type &Prev = *Next.getPrev();
+    Final.setNext(&Next);
+    First.setPrev(&Prev);
+    Prev.setNext(&First);
+    Next.setPrev(&Final);
+  }
+
+  template <class T> static void insertBefore(T &Next, T &N) {
+    insertBeforeImpl(Next, N);
+  }
+
+  template <class T> static void remove(T &N) { removeImpl(N); }
+  template <class T> static void removeRange(T &First, T &Last) {
+    removeRangeImpl(First, Last);
+  }
+
+  template <class T> static void transferBefore(T &Next, T &First, T &Last) {
+    transferBeforeImpl(Next, First, Last);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_BASE_H
diff --git a/linux-x64/clang/include/llvm/ADT/ilist_iterator.h b/linux-x64/clang/include/llvm/ADT/ilist_iterator.h
new file mode 100644
index 0000000..671e644
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ilist_iterator.h
@@ -0,0 +1,199 @@
+//===- llvm/ADT/ilist_iterator.h - Intrusive List Iterator ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_ITERATOR_H
+#define LLVM_ADT_ILIST_ITERATOR_H
+
+#include "llvm/ADT/ilist_node.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+
+namespace llvm {
+
+namespace ilist_detail {
+
+/// Find const-correct node types.
+template <class OptionsT, bool IsConst> struct IteratorTraits;
+template <class OptionsT> struct IteratorTraits<OptionsT, false> {
+  using value_type = typename OptionsT::value_type;
+  using pointer = typename OptionsT::pointer;
+  using reference = typename OptionsT::reference;
+  using node_pointer = ilist_node_impl<OptionsT> *;
+  using node_reference = ilist_node_impl<OptionsT> &;
+};
+template <class OptionsT> struct IteratorTraits<OptionsT, true> {
+  using value_type = const typename OptionsT::value_type;
+  using pointer = typename OptionsT::const_pointer;
+  using reference = typename OptionsT::const_reference;
+  using node_pointer = const ilist_node_impl<OptionsT> *;
+  using node_reference = const ilist_node_impl<OptionsT> &;
+};
+
+template <bool IsReverse> struct IteratorHelper;
+template <> struct IteratorHelper<false> : ilist_detail::NodeAccess {
+  using Access = ilist_detail::NodeAccess;
+
+  template <class T> static void increment(T *&I) { I = Access::getNext(*I); }
+  template <class T> static void decrement(T *&I) { I = Access::getPrev(*I); }
+};
+template <> struct IteratorHelper<true> : ilist_detail::NodeAccess {
+  using Access = ilist_detail::NodeAccess;
+
+  template <class T> static void increment(T *&I) { I = Access::getPrev(*I); }
+  template <class T> static void decrement(T *&I) { I = Access::getNext(*I); }
+};
+
+} // end namespace ilist_detail
+
+/// Iterator for intrusive lists  based on ilist_node.
+template <class OptionsT, bool IsReverse, bool IsConst>
+class ilist_iterator : ilist_detail::SpecificNodeAccess<OptionsT> {
+  friend ilist_iterator<OptionsT, IsReverse, !IsConst>;
+  friend ilist_iterator<OptionsT, !IsReverse, IsConst>;
+  friend ilist_iterator<OptionsT, !IsReverse, !IsConst>;
+
+  using Traits = ilist_detail::IteratorTraits<OptionsT, IsConst>;
+  using Access = ilist_detail::SpecificNodeAccess<OptionsT>;
+
+public:
+  using value_type = typename Traits::value_type;
+  using pointer = typename Traits::pointer;
+  using reference = typename Traits::reference;
+  using difference_type = ptrdiff_t;
+  using iterator_category = std::bidirectional_iterator_tag;
+  using const_pointer = typename OptionsT::const_pointer;
+  using const_reference = typename OptionsT::const_reference;
+
+private:
+  using node_pointer = typename Traits::node_pointer;
+  using node_reference = typename Traits::node_reference;
+
+  node_pointer NodePtr = nullptr;
+
+public:
+  /// Create from an ilist_node.
+  explicit ilist_iterator(node_reference N) : NodePtr(&N) {}
+
+  explicit ilist_iterator(pointer NP) : NodePtr(Access::getNodePtr(NP)) {}
+  explicit ilist_iterator(reference NR) : NodePtr(Access::getNodePtr(&NR)) {}
+  ilist_iterator() = default;
+
+  // This is templated so that we can allow constructing a const iterator from
+  // a nonconst iterator...
+  template <bool RHSIsConst>
+  ilist_iterator(
+      const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS,
+      typename std::enable_if<IsConst || !RHSIsConst, void *>::type = nullptr)
+      : NodePtr(RHS.NodePtr) {}
+
+  // This is templated so that we can allow assigning to a const iterator from
+  // a nonconst iterator...
+  template <bool RHSIsConst>
+  typename std::enable_if<IsConst || !RHSIsConst, ilist_iterator &>::type
+  operator=(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS) {
+    NodePtr = RHS.NodePtr;
+    return *this;
+  }
+
+  /// Explicit conversion between forward/reverse iterators.
+  ///
+  /// Translate between forward and reverse iterators without changing range
+  /// boundaries.  The resulting iterator will dereference (and have a handle)
+  /// to the previous node, which is somewhat unexpected; but converting the
+  /// two endpoints in a range will give the same range in reverse.
+  ///
+  /// This matches std::reverse_iterator conversions.
+  explicit ilist_iterator(
+      const ilist_iterator<OptionsT, !IsReverse, IsConst> &RHS)
+      : ilist_iterator(++RHS.getReverse()) {}
+
+  /// Get a reverse iterator to the same node.
+  ///
+  /// Gives a reverse iterator that will dereference (and have a handle) to the
+  /// same node.  Converting the endpoint iterators in a range will give a
+  /// different range; for range operations, use the explicit conversions.
+  ilist_iterator<OptionsT, !IsReverse, IsConst> getReverse() const {
+    if (NodePtr)
+      return ilist_iterator<OptionsT, !IsReverse, IsConst>(*NodePtr);
+    return ilist_iterator<OptionsT, !IsReverse, IsConst>();
+  }
+
+  /// Const-cast.
+  ilist_iterator<OptionsT, IsReverse, false> getNonConst() const {
+    if (NodePtr)
+      return ilist_iterator<OptionsT, IsReverse, false>(
+          const_cast<typename ilist_iterator<OptionsT, IsReverse,
+                                             false>::node_reference>(*NodePtr));
+    return ilist_iterator<OptionsT, IsReverse, false>();
+  }
+
+  // Accessors...
+  reference operator*() const {
+    assert(!NodePtr->isKnownSentinel());
+    return *Access::getValuePtr(NodePtr);
+  }
+  pointer operator->() const { return &operator*(); }
+
+  // Comparison operators
+  friend bool operator==(const ilist_iterator &LHS, const ilist_iterator &RHS) {
+    return LHS.NodePtr == RHS.NodePtr;
+  }
+  friend bool operator!=(const ilist_iterator &LHS, const ilist_iterator &RHS) {
+    return LHS.NodePtr != RHS.NodePtr;
+  }
+
+  // Increment and decrement operators...
+  ilist_iterator &operator--() {
+    NodePtr = IsReverse ? NodePtr->getNext() : NodePtr->getPrev();
+    return *this;
+  }
+  ilist_iterator &operator++() {
+    NodePtr = IsReverse ? NodePtr->getPrev() : NodePtr->getNext();
+    return *this;
+  }
+  ilist_iterator operator--(int) {
+    ilist_iterator tmp = *this;
+    --*this;
+    return tmp;
+  }
+  ilist_iterator operator++(int) {
+    ilist_iterator tmp = *this;
+    ++*this;
+    return tmp;
+  }
+
+  /// Get the underlying ilist_node.
+  node_pointer getNodePtr() const { return static_cast<node_pointer>(NodePtr); }
+
+  /// Check for end.  Only valid if ilist_sentinel_tracking<true>.
+  bool isEnd() const { return NodePtr ? NodePtr->isSentinel() : false; }
+};
+
+template <typename From> struct simplify_type;
+
+/// Allow ilist_iterators to convert into pointers to a node automatically when
+/// used by the dyn_cast, cast, isa mechanisms...
+///
+/// FIXME: remove this, since there is no implicit conversion to NodeTy.
+template <class OptionsT, bool IsConst>
+struct simplify_type<ilist_iterator<OptionsT, false, IsConst>> {
+  using iterator = ilist_iterator<OptionsT, false, IsConst>;
+  using SimpleType = typename iterator::pointer;
+
+  static SimpleType getSimplifiedValue(const iterator &Node) { return &*Node; }
+};
+template <class OptionsT, bool IsConst>
+struct simplify_type<const ilist_iterator<OptionsT, false, IsConst>>
+    : simplify_type<ilist_iterator<OptionsT, false, IsConst>> {};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_ITERATOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/ilist_node.h b/linux-x64/clang/include/llvm/ADT/ilist_node.h
new file mode 100644
index 0000000..3362611
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ilist_node.h
@@ -0,0 +1,306 @@
+//===- llvm/ADT/ilist_node.h - Intrusive Linked List Helper -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ilist_node class template, which is a convenient
+// base class for creating classes that can be used with ilists.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_NODE_H
+#define LLVM_ADT_ILIST_NODE_H
+
+#include "llvm/ADT/ilist_node_base.h"
+#include "llvm/ADT/ilist_node_options.h"
+
+namespace llvm {
+
+namespace ilist_detail {
+
+struct NodeAccess;
+
+} // end namespace ilist_detail
+
+template <class OptionsT, bool IsReverse, bool IsConst> class ilist_iterator;
+template <class OptionsT> class ilist_sentinel;
+
+/// Implementation for an ilist node.
+///
+/// Templated on an appropriate \a ilist_detail::node_options, usually computed
+/// by \a ilist_detail::compute_node_options.
+///
+/// This is a wrapper around \a ilist_node_base whose main purpose is to
+/// provide type safety: you can't insert nodes of \a ilist_node_impl into the
+/// wrong \a simple_ilist or \a iplist.
+template <class OptionsT> class ilist_node_impl : OptionsT::node_base_type {
+  using value_type = typename OptionsT::value_type;
+  using node_base_type = typename OptionsT::node_base_type;
+  using list_base_type = typename OptionsT::list_base_type;
+
+  friend typename OptionsT::list_base_type;
+  friend struct ilist_detail::NodeAccess;
+  friend class ilist_sentinel<OptionsT>;
+  friend class ilist_iterator<OptionsT, false, false>;
+  friend class ilist_iterator<OptionsT, false, true>;
+  friend class ilist_iterator<OptionsT, true, false>;
+  friend class ilist_iterator<OptionsT, true, true>;
+
+protected:
+  using self_iterator = ilist_iterator<OptionsT, false, false>;
+  using const_self_iterator = ilist_iterator<OptionsT, false, true>;
+  using reverse_self_iterator = ilist_iterator<OptionsT, true, false>;
+  using const_reverse_self_iterator = ilist_iterator<OptionsT, true, true>;
+
+  ilist_node_impl() = default;
+
+private:
+  ilist_node_impl *getPrev() {
+    return static_cast<ilist_node_impl *>(node_base_type::getPrev());
+  }
+
+  ilist_node_impl *getNext() {
+    return static_cast<ilist_node_impl *>(node_base_type::getNext());
+  }
+
+  const ilist_node_impl *getPrev() const {
+    return static_cast<ilist_node_impl *>(node_base_type::getPrev());
+  }
+
+  const ilist_node_impl *getNext() const {
+    return static_cast<ilist_node_impl *>(node_base_type::getNext());
+  }
+
+  void setPrev(ilist_node_impl *N) { node_base_type::setPrev(N); }
+  void setNext(ilist_node_impl *N) { node_base_type::setNext(N); }
+
+public:
+  self_iterator getIterator() { return self_iterator(*this); }
+  const_self_iterator getIterator() const { return const_self_iterator(*this); }
+
+  reverse_self_iterator getReverseIterator() {
+    return reverse_self_iterator(*this);
+  }
+
+  const_reverse_self_iterator getReverseIterator() const {
+    return const_reverse_self_iterator(*this);
+  }
+
+  // Under-approximation, but always available for assertions.
+  using node_base_type::isKnownSentinel;
+
+  /// Check whether this is the sentinel node.
+  ///
+  /// This requires sentinel tracking to be explicitly enabled.  Use the
+  /// ilist_sentinel_tracking<true> option to get this API.
+  bool isSentinel() const {
+    static_assert(OptionsT::is_sentinel_tracking_explicit,
+                  "Use ilist_sentinel_tracking<true> to enable isSentinel()");
+    return node_base_type::isSentinel();
+  }
+};
+
+/// An intrusive list node.
+///
+/// A base class to enable membership in intrusive lists, including \a
+/// simple_ilist, \a iplist, and \a ilist.  The first template parameter is the
+/// \a value_type for the list.
+///
+/// An ilist node can be configured with compile-time options to change
+/// behaviour and/or add API.
+///
+/// By default, an \a ilist_node knows whether it is the list sentinel (an
+/// instance of \a ilist_sentinel) if and only if
+/// LLVM_ENABLE_ABI_BREAKING_CHECKS.  The function \a isKnownSentinel() always
+/// returns \c false tracking is off.  Sentinel tracking steals a bit from the
+/// "prev" link, which adds a mask operation when decrementing an iterator, but
+/// enables bug-finding assertions in \a ilist_iterator.
+///
+/// To turn sentinel tracking on all the time, pass in the
+/// ilist_sentinel_tracking<true> template parameter.  This also enables the \a
+/// isSentinel() function.  The same option must be passed to the intrusive
+/// list.  (ilist_sentinel_tracking<false> turns sentinel tracking off all the
+/// time.)
+///
+/// A type can inherit from ilist_node multiple times by passing in different
+/// \a ilist_tag options.  This allows a single instance to be inserted into
+/// multiple lists simultaneously, where each list is given the same tag.
+///
+/// \example
+/// struct A {};
+/// struct B {};
+/// struct N : ilist_node<N, ilist_tag<A>>, ilist_node<N, ilist_tag<B>> {};
+///
+/// void foo() {
+///   simple_ilist<N, ilist_tag<A>> ListA;
+///   simple_ilist<N, ilist_tag<B>> ListB;
+///   N N1;
+///   ListA.push_back(N1);
+///   ListB.push_back(N1);
+/// }
+/// \endexample
+///
+/// See \a is_valid_option for steps on adding a new option.
+template <class T, class... Options>
+class ilist_node
+    : public ilist_node_impl<
+          typename ilist_detail::compute_node_options<T, Options...>::type> {
+  static_assert(ilist_detail::check_options<Options...>::value,
+                "Unrecognized node option!");
+};
+
+namespace ilist_detail {
+
+/// An access class for ilist_node private API.
+///
+/// This gives access to the private parts of ilist nodes.  Nodes for an ilist
+/// should friend this class if they inherit privately from ilist_node.
+///
+/// Using this class outside of the ilist implementation is unsupported.
+struct NodeAccess {
+protected:
+  template <class OptionsT>
+  static ilist_node_impl<OptionsT> *getNodePtr(typename OptionsT::pointer N) {
+    return N;
+  }
+
+  template <class OptionsT>
+  static const ilist_node_impl<OptionsT> *
+  getNodePtr(typename OptionsT::const_pointer N) {
+    return N;
+  }
+
+  template <class OptionsT>
+  static typename OptionsT::pointer getValuePtr(ilist_node_impl<OptionsT> *N) {
+    return static_cast<typename OptionsT::pointer>(N);
+  }
+
+  template <class OptionsT>
+  static typename OptionsT::const_pointer
+  getValuePtr(const ilist_node_impl<OptionsT> *N) {
+    return static_cast<typename OptionsT::const_pointer>(N);
+  }
+
+  template <class OptionsT>
+  static ilist_node_impl<OptionsT> *getPrev(ilist_node_impl<OptionsT> &N) {
+    return N.getPrev();
+  }
+
+  template <class OptionsT>
+  static ilist_node_impl<OptionsT> *getNext(ilist_node_impl<OptionsT> &N) {
+    return N.getNext();
+  }
+
+  template <class OptionsT>
+  static const ilist_node_impl<OptionsT> *
+  getPrev(const ilist_node_impl<OptionsT> &N) {
+    return N.getPrev();
+  }
+
+  template <class OptionsT>
+  static const ilist_node_impl<OptionsT> *
+  getNext(const ilist_node_impl<OptionsT> &N) {
+    return N.getNext();
+  }
+};
+
+template <class OptionsT> struct SpecificNodeAccess : NodeAccess {
+protected:
+  using pointer = typename OptionsT::pointer;
+  using const_pointer = typename OptionsT::const_pointer;
+  using node_type = ilist_node_impl<OptionsT>;
+
+  static node_type *getNodePtr(pointer N) {
+    return NodeAccess::getNodePtr<OptionsT>(N);
+  }
+
+  static const node_type *getNodePtr(const_pointer N) {
+    return NodeAccess::getNodePtr<OptionsT>(N);
+  }
+
+  static pointer getValuePtr(node_type *N) {
+    return NodeAccess::getValuePtr<OptionsT>(N);
+  }
+
+  static const_pointer getValuePtr(const node_type *N) {
+    return NodeAccess::getValuePtr<OptionsT>(N);
+  }
+};
+
+} // end namespace ilist_detail
+
+template <class OptionsT>
+class ilist_sentinel : public ilist_node_impl<OptionsT> {
+public:
+  ilist_sentinel() {
+    this->initializeSentinel();
+    reset();
+  }
+
+  void reset() {
+    this->setPrev(this);
+    this->setNext(this);
+  }
+
+  bool empty() const { return this == this->getPrev(); }
+};
+
+/// An ilist node that can access its parent list.
+///
+/// Requires \c NodeTy to have \a getParent() to find the parent node, and the
+/// \c ParentTy to have \a getSublistAccess() to get a reference to the list.
+template <typename NodeTy, typename ParentTy, class... Options>
+class ilist_node_with_parent : public ilist_node<NodeTy, Options...> {
+protected:
+  ilist_node_with_parent() = default;
+
+private:
+  /// Forward to NodeTy::getParent().
+  ///
+  /// Note: do not use the name "getParent()".  We want a compile error
+  /// (instead of recursion) when the subclass fails to implement \a
+  /// getParent().
+  const ParentTy *getNodeParent() const {
+    return static_cast<const NodeTy *>(this)->getParent();
+  }
+
+public:
+  /// @name Adjacent Node Accessors
+  /// @{
+  /// \brief Get the previous node, or \c nullptr for the list head.
+  NodeTy *getPrevNode() {
+    // Should be separated to a reused function, but then we couldn't use auto
+    // (and would need the type of the list).
+    const auto &List =
+        getNodeParent()->*(ParentTy::getSublistAccess((NodeTy *)nullptr));
+    return List.getPrevNode(*static_cast<NodeTy *>(this));
+  }
+
+  /// \brief Get the previous node, or \c nullptr for the list head.
+  const NodeTy *getPrevNode() const {
+    return const_cast<ilist_node_with_parent *>(this)->getPrevNode();
+  }
+
+  /// \brief Get the next node, or \c nullptr for the list tail.
+  NodeTy *getNextNode() {
+    // Should be separated to a reused function, but then we couldn't use auto
+    // (and would need the type of the list).
+    const auto &List =
+        getNodeParent()->*(ParentTy::getSublistAccess((NodeTy *)nullptr));
+    return List.getNextNode(*static_cast<NodeTy *>(this));
+  }
+
+  /// \brief Get the next node, or \c nullptr for the list tail.
+  const NodeTy *getNextNode() const {
+    return const_cast<ilist_node_with_parent *>(this)->getNextNode();
+  }
+  /// @}
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_NODE_H
diff --git a/linux-x64/clang/include/llvm/ADT/ilist_node_base.h b/linux-x64/clang/include/llvm/ADT/ilist_node_base.h
new file mode 100644
index 0000000..e5062ac
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ilist_node_base.h
@@ -0,0 +1,53 @@
+//===- llvm/ADT/ilist_node_base.h - Intrusive List Node Base -----*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_NODE_BASE_H
+#define LLVM_ADT_ILIST_NODE_BASE_H
+
+#include "llvm/ADT/PointerIntPair.h"
+
+namespace llvm {
+
+/// Base class for ilist nodes.
+///
+/// Optionally tracks whether this node is the sentinel.
+template <bool EnableSentinelTracking> class ilist_node_base;
+
+template <> class ilist_node_base<false> {
+  ilist_node_base *Prev = nullptr;
+  ilist_node_base *Next = nullptr;
+
+public:
+  void setPrev(ilist_node_base *Prev) { this->Prev = Prev; }
+  void setNext(ilist_node_base *Next) { this->Next = Next; }
+  ilist_node_base *getPrev() const { return Prev; }
+  ilist_node_base *getNext() const { return Next; }
+
+  bool isKnownSentinel() const { return false; }
+  void initializeSentinel() {}
+};
+
+template <> class ilist_node_base<true> {
+  PointerIntPair<ilist_node_base *, 1> PrevAndSentinel;
+  ilist_node_base *Next = nullptr;
+
+public:
+  void setPrev(ilist_node_base *Prev) { PrevAndSentinel.setPointer(Prev); }
+  void setNext(ilist_node_base *Next) { this->Next = Next; }
+  ilist_node_base *getPrev() const { return PrevAndSentinel.getPointer(); }
+  ilist_node_base *getNext() const { return Next; }
+
+  bool isSentinel() const { return PrevAndSentinel.getInt(); }
+  bool isKnownSentinel() const { return isSentinel(); }
+  void initializeSentinel() { PrevAndSentinel.setInt(true); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_NODE_BASE_H
diff --git a/linux-x64/clang/include/llvm/ADT/ilist_node_options.h b/linux-x64/clang/include/llvm/ADT/ilist_node_options.h
new file mode 100644
index 0000000..c33df1e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/ilist_node_options.h
@@ -0,0 +1,133 @@
+//===- llvm/ADT/ilist_node_options.h - ilist_node Options -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_NODE_OPTIONS_H
+#define LLVM_ADT_ILIST_NODE_OPTIONS_H
+
+#include "llvm/Config/abi-breaking.h"
+#include "llvm/Config/llvm-config.h"
+
+#include <type_traits>
+
+namespace llvm {
+
+template <bool EnableSentinelTracking> class ilist_node_base;
+template <bool EnableSentinelTracking> class ilist_base;
+
+/// Option to choose whether to track sentinels.
+///
+/// This option affects the ABI for the nodes.  When not specified explicitly,
+/// the ABI depends on LLVM_ENABLE_ABI_BREAKING_CHECKS.  Specify explicitly to
+/// enable \a ilist_node::isSentinel().
+template <bool EnableSentinelTracking> struct ilist_sentinel_tracking {};
+
+/// Option to specify a tag for the node type.
+///
+/// This option allows a single value type to be inserted in multiple lists
+/// simultaneously.  See \a ilist_node for usage examples.
+template <class Tag> struct ilist_tag {};
+
+namespace ilist_detail {
+
+/// Helper trait for recording whether an option is specified explicitly.
+template <bool IsExplicit> struct explicitness {
+  static const bool is_explicit = IsExplicit;
+};
+typedef explicitness<true> is_explicit;
+typedef explicitness<false> is_implicit;
+
+/// Check whether an option is valid.
+///
+/// The steps for adding and enabling a new ilist option include:
+/// \li define the option, ilist_foo<Bar>, above;
+/// \li add new parameters for Bar to \a ilist_detail::node_options;
+/// \li add an extraction meta-function, ilist_detail::extract_foo;
+/// \li call extract_foo from \a ilist_detail::compute_node_options and pass it
+/// into \a ilist_detail::node_options; and
+/// \li specialize \c is_valid_option<ilist_foo<Bar>> to inherit from \c
+/// std::true_type to get static assertions passing in \a simple_ilist and \a
+/// ilist_node.
+template <class Option> struct is_valid_option : std::false_type {};
+
+/// Extract sentinel tracking option.
+///
+/// Look through \p Options for the \a ilist_sentinel_tracking option, with the
+/// default depending on LLVM_ENABLE_ABI_BREAKING_CHECKS.
+template <class... Options> struct extract_sentinel_tracking;
+template <bool EnableSentinelTracking, class... Options>
+struct extract_sentinel_tracking<
+    ilist_sentinel_tracking<EnableSentinelTracking>, Options...>
+    : std::integral_constant<bool, EnableSentinelTracking>, is_explicit {};
+template <class Option1, class... Options>
+struct extract_sentinel_tracking<Option1, Options...>
+    : extract_sentinel_tracking<Options...> {};
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+template <> struct extract_sentinel_tracking<> : std::true_type, is_implicit {};
+#else
+template <>
+struct extract_sentinel_tracking<> : std::false_type, is_implicit {};
+#endif
+template <bool EnableSentinelTracking>
+struct is_valid_option<ilist_sentinel_tracking<EnableSentinelTracking>>
+    : std::true_type {};
+
+/// Extract custom tag option.
+///
+/// Look through \p Options for the \a ilist_tag option, pulling out the
+/// custom tag type, using void as a default.
+template <class... Options> struct extract_tag;
+template <class Tag, class... Options>
+struct extract_tag<ilist_tag<Tag>, Options...> {
+  typedef Tag type;
+};
+template <class Option1, class... Options>
+struct extract_tag<Option1, Options...> : extract_tag<Options...> {};
+template <> struct extract_tag<> { typedef void type; };
+template <class Tag> struct is_valid_option<ilist_tag<Tag>> : std::true_type {};
+
+/// Check whether options are valid.
+///
+/// The conjunction of \a is_valid_option on each individual option.
+template <class... Options> struct check_options;
+template <> struct check_options<> : std::true_type {};
+template <class Option1, class... Options>
+struct check_options<Option1, Options...>
+    : std::integral_constant<bool, is_valid_option<Option1>::value &&
+                                       check_options<Options...>::value> {};
+
+/// Traits for options for \a ilist_node.
+///
+/// This is usually computed via \a compute_node_options.
+template <class T, bool EnableSentinelTracking, bool IsSentinelTrackingExplicit,
+          class TagT>
+struct node_options {
+  typedef T value_type;
+  typedef T *pointer;
+  typedef T &reference;
+  typedef const T *const_pointer;
+  typedef const T &const_reference;
+
+  static const bool enable_sentinel_tracking = EnableSentinelTracking;
+  static const bool is_sentinel_tracking_explicit = IsSentinelTrackingExplicit;
+  typedef TagT tag;
+  typedef ilist_node_base<enable_sentinel_tracking> node_base_type;
+  typedef ilist_base<enable_sentinel_tracking> list_base_type;
+};
+
+template <class T, class... Options> struct compute_node_options {
+  typedef node_options<T, extract_sentinel_tracking<Options...>::value,
+                       extract_sentinel_tracking<Options...>::is_explicit,
+                       typename extract_tag<Options...>::type>
+      type;
+};
+
+} // end namespace ilist_detail
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_NODE_OPTIONS_H
diff --git a/linux-x64/clang/include/llvm/ADT/iterator.h b/linux-x64/clang/include/llvm/ADT/iterator.h
new file mode 100644
index 0000000..711f8f2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/iterator.h
@@ -0,0 +1,339 @@
+//===- iterator.h - Utilities for using and defining iterators --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ITERATOR_H
+#define LLVM_ADT_ITERATOR_H
+
+#include "llvm/ADT/iterator_range.h"
+#include <algorithm>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+/// \brief CRTP base class which implements the entire standard iterator facade
+/// in terms of a minimal subset of the interface.
+///
+/// Use this when it is reasonable to implement most of the iterator
+/// functionality in terms of a core subset. If you need special behavior or
+/// there are performance implications for this, you may want to override the
+/// relevant members instead.
+///
+/// Note, one abstraction that this does *not* provide is implementing
+/// subtraction in terms of addition by negating the difference. Negation isn't
+/// always information preserving, and I can see very reasonable iterator
+/// designs where this doesn't work well. It doesn't really force much added
+/// boilerplate anyways.
+///
+/// Another abstraction that this doesn't provide is implementing increment in
+/// terms of addition of one. These aren't equivalent for all iterator
+/// categories, and respecting that adds a lot of complexity for little gain.
+///
+/// Classes wishing to use `iterator_facade_base` should implement the following
+/// methods:
+///
+/// Forward Iterators:
+///   (All of the following methods)
+///   - DerivedT &operator=(const DerivedT &R);
+///   - bool operator==(const DerivedT &R) const;
+///   - const T &operator*() const;
+///   - T &operator*();
+///   - DerivedT &operator++();
+///
+/// Bidirectional Iterators:
+///   (All methods of forward iterators, plus the following)
+///   - DerivedT &operator--();
+///
+/// Random-access Iterators:
+///   (All methods of bidirectional iterators excluding the following)
+///   - DerivedT &operator++();
+///   - DerivedT &operator--();
+///   (and plus the following)
+///   - bool operator<(const DerivedT &RHS) const;
+///   - DifferenceTypeT operator-(const DerivedT &R) const;
+///   - DerivedT &operator+=(DifferenceTypeT N);
+///   - DerivedT &operator-=(DifferenceTypeT N);
+///
+template <typename DerivedT, typename IteratorCategoryT, typename T,
+          typename DifferenceTypeT = std::ptrdiff_t, typename PointerT = T *,
+          typename ReferenceT = T &>
+class iterator_facade_base
+    : public std::iterator<IteratorCategoryT, T, DifferenceTypeT, PointerT,
+                           ReferenceT> {
+protected:
+  enum {
+    IsRandomAccess = std::is_base_of<std::random_access_iterator_tag,
+                                     IteratorCategoryT>::value,
+    IsBidirectional = std::is_base_of<std::bidirectional_iterator_tag,
+                                      IteratorCategoryT>::value,
+  };
+
+  /// A proxy object for computing a reference via indirecting a copy of an
+  /// iterator. This is used in APIs which need to produce a reference via
+  /// indirection but for which the iterator object might be a temporary. The
+  /// proxy preserves the iterator internally and exposes the indirected
+  /// reference via a conversion operator.
+  class ReferenceProxy {
+    friend iterator_facade_base;
+
+    DerivedT I;
+
+    ReferenceProxy(DerivedT I) : I(std::move(I)) {}
+
+  public:
+    operator ReferenceT() const { return *I; }
+  };
+
+public:
+  DerivedT operator+(DifferenceTypeT n) const {
+    static_assert(std::is_base_of<iterator_facade_base, DerivedT>::value,
+                  "Must pass the derived type to this template!");
+    static_assert(
+        IsRandomAccess,
+        "The '+' operator is only defined for random access iterators.");
+    DerivedT tmp = *static_cast<const DerivedT *>(this);
+    tmp += n;
+    return tmp;
+  }
+  friend DerivedT operator+(DifferenceTypeT n, const DerivedT &i) {
+    static_assert(
+        IsRandomAccess,
+        "The '+' operator is only defined for random access iterators.");
+    return i + n;
+  }
+  DerivedT operator-(DifferenceTypeT n) const {
+    static_assert(
+        IsRandomAccess,
+        "The '-' operator is only defined for random access iterators.");
+    DerivedT tmp = *static_cast<const DerivedT *>(this);
+    tmp -= n;
+    return tmp;
+  }
+
+  DerivedT &operator++() {
+    static_assert(std::is_base_of<iterator_facade_base, DerivedT>::value,
+                  "Must pass the derived type to this template!");
+    return static_cast<DerivedT *>(this)->operator+=(1);
+  }
+  DerivedT operator++(int) {
+    DerivedT tmp = *static_cast<DerivedT *>(this);
+    ++*static_cast<DerivedT *>(this);
+    return tmp;
+  }
+  DerivedT &operator--() {
+    static_assert(
+        IsBidirectional,
+        "The decrement operator is only defined for bidirectional iterators.");
+    return static_cast<DerivedT *>(this)->operator-=(1);
+  }
+  DerivedT operator--(int) {
+    static_assert(
+        IsBidirectional,
+        "The decrement operator is only defined for bidirectional iterators.");
+    DerivedT tmp = *static_cast<DerivedT *>(this);
+    --*static_cast<DerivedT *>(this);
+    return tmp;
+  }
+
+  bool operator!=(const DerivedT &RHS) const {
+    return !static_cast<const DerivedT *>(this)->operator==(RHS);
+  }
+
+  bool operator>(const DerivedT &RHS) const {
+    static_assert(
+        IsRandomAccess,
+        "Relational operators are only defined for random access iterators.");
+    return !static_cast<const DerivedT *>(this)->operator<(RHS) &&
+           !static_cast<const DerivedT *>(this)->operator==(RHS);
+  }
+  bool operator<=(const DerivedT &RHS) const {
+    static_assert(
+        IsRandomAccess,
+        "Relational operators are only defined for random access iterators.");
+    return !static_cast<const DerivedT *>(this)->operator>(RHS);
+  }
+  bool operator>=(const DerivedT &RHS) const {
+    static_assert(
+        IsRandomAccess,
+        "Relational operators are only defined for random access iterators.");
+    return !static_cast<const DerivedT *>(this)->operator<(RHS);
+  }
+
+  PointerT operator->() { return &static_cast<DerivedT *>(this)->operator*(); }
+  PointerT operator->() const {
+    return &static_cast<const DerivedT *>(this)->operator*();
+  }
+  ReferenceProxy operator[](DifferenceTypeT n) {
+    static_assert(IsRandomAccess,
+                  "Subscripting is only defined for random access iterators.");
+    return ReferenceProxy(static_cast<DerivedT *>(this)->operator+(n));
+  }
+  ReferenceProxy operator[](DifferenceTypeT n) const {
+    static_assert(IsRandomAccess,
+                  "Subscripting is only defined for random access iterators.");
+    return ReferenceProxy(static_cast<const DerivedT *>(this)->operator+(n));
+  }
+};
+
+/// \brief CRTP base class for adapting an iterator to a different type.
+///
+/// This class can be used through CRTP to adapt one iterator into another.
+/// Typically this is done through providing in the derived class a custom \c
+/// operator* implementation. Other methods can be overridden as well.
+template <
+    typename DerivedT, typename WrappedIteratorT,
+    typename IteratorCategoryT =
+        typename std::iterator_traits<WrappedIteratorT>::iterator_category,
+    typename T = typename std::iterator_traits<WrappedIteratorT>::value_type,
+    typename DifferenceTypeT =
+        typename std::iterator_traits<WrappedIteratorT>::difference_type,
+    typename PointerT = typename std::conditional<
+        std::is_same<T, typename std::iterator_traits<
+                            WrappedIteratorT>::value_type>::value,
+        typename std::iterator_traits<WrappedIteratorT>::pointer, T *>::type,
+    typename ReferenceT = typename std::conditional<
+        std::is_same<T, typename std::iterator_traits<
+                            WrappedIteratorT>::value_type>::value,
+        typename std::iterator_traits<WrappedIteratorT>::reference, T &>::type,
+    // Don't provide these, they are mostly to act as aliases below.
+    typename WrappedTraitsT = std::iterator_traits<WrappedIteratorT>>
+class iterator_adaptor_base
+    : public iterator_facade_base<DerivedT, IteratorCategoryT, T,
+                                  DifferenceTypeT, PointerT, ReferenceT> {
+  using BaseT = typename iterator_adaptor_base::iterator_facade_base;
+
+protected:
+  WrappedIteratorT I;
+
+  iterator_adaptor_base() = default;
+
+  explicit iterator_adaptor_base(WrappedIteratorT u) : I(std::move(u)) {
+    static_assert(std::is_base_of<iterator_adaptor_base, DerivedT>::value,
+                  "Must pass the derived type to this template!");
+  }
+
+  const WrappedIteratorT &wrapped() const { return I; }
+
+public:
+  using difference_type = DifferenceTypeT;
+
+  DerivedT &operator+=(difference_type n) {
+    static_assert(
+        BaseT::IsRandomAccess,
+        "The '+=' operator is only defined for random access iterators.");
+    I += n;
+    return *static_cast<DerivedT *>(this);
+  }
+  DerivedT &operator-=(difference_type n) {
+    static_assert(
+        BaseT::IsRandomAccess,
+        "The '-=' operator is only defined for random access iterators.");
+    I -= n;
+    return *static_cast<DerivedT *>(this);
+  }
+  using BaseT::operator-;
+  difference_type operator-(const DerivedT &RHS) const {
+    static_assert(
+        BaseT::IsRandomAccess,
+        "The '-' operator is only defined for random access iterators.");
+    return I - RHS.I;
+  }
+
+  // We have to explicitly provide ++ and -- rather than letting the facade
+  // forward to += because WrappedIteratorT might not support +=.
+  using BaseT::operator++;
+  DerivedT &operator++() {
+    ++I;
+    return *static_cast<DerivedT *>(this);
+  }
+  using BaseT::operator--;
+  DerivedT &operator--() {
+    static_assert(
+        BaseT::IsBidirectional,
+        "The decrement operator is only defined for bidirectional iterators.");
+    --I;
+    return *static_cast<DerivedT *>(this);
+  }
+
+  bool operator==(const DerivedT &RHS) const { return I == RHS.I; }
+  bool operator<(const DerivedT &RHS) const {
+    static_assert(
+        BaseT::IsRandomAccess,
+        "Relational operators are only defined for random access iterators.");
+    return I < RHS.I;
+  }
+
+  ReferenceT operator*() const { return *I; }
+};
+
+/// \brief An iterator type that allows iterating over the pointees via some
+/// other iterator.
+///
+/// The typical usage of this is to expose a type that iterates over Ts, but
+/// which is implemented with some iterator over T*s:
+///
+/// \code
+///   using iterator = pointee_iterator<SmallVectorImpl<T *>::iterator>;
+/// \endcode
+template <typename WrappedIteratorT,
+          typename T = typename std::remove_reference<
+              decltype(**std::declval<WrappedIteratorT>())>::type>
+struct pointee_iterator
+    : iterator_adaptor_base<
+          pointee_iterator<WrappedIteratorT>, WrappedIteratorT,
+          typename std::iterator_traits<WrappedIteratorT>::iterator_category,
+          T> {
+  pointee_iterator() = default;
+  template <typename U>
+  pointee_iterator(U &&u)
+      : pointee_iterator::iterator_adaptor_base(std::forward<U &&>(u)) {}
+
+  T &operator*() const { return **this->I; }
+};
+
+template <typename RangeT, typename WrappedIteratorT =
+                               decltype(std::begin(std::declval<RangeT>()))>
+iterator_range<pointee_iterator<WrappedIteratorT>>
+make_pointee_range(RangeT &&Range) {
+  using PointeeIteratorT = pointee_iterator<WrappedIteratorT>;
+  return make_range(PointeeIteratorT(std::begin(std::forward<RangeT>(Range))),
+                    PointeeIteratorT(std::end(std::forward<RangeT>(Range))));
+}
+
+template <typename WrappedIteratorT,
+          typename T = decltype(&*std::declval<WrappedIteratorT>())>
+class pointer_iterator
+    : public iterator_adaptor_base<pointer_iterator<WrappedIteratorT>,
+                                   WrappedIteratorT, T> {
+  mutable T Ptr;
+
+public:
+  pointer_iterator() = default;
+
+  explicit pointer_iterator(WrappedIteratorT u)
+      : pointer_iterator::iterator_adaptor_base(std::move(u)) {}
+
+  T &operator*() { return Ptr = &*this->I; }
+  const T &operator*() const { return Ptr = &*this->I; }
+};
+
+template <typename RangeT, typename WrappedIteratorT =
+                               decltype(std::begin(std::declval<RangeT>()))>
+iterator_range<pointer_iterator<WrappedIteratorT>>
+make_pointer_range(RangeT &&Range) {
+  using PointerIteratorT = pointer_iterator<WrappedIteratorT>;
+  return make_range(PointerIteratorT(std::begin(std::forward<RangeT>(Range))),
+                    PointerIteratorT(std::end(std::forward<RangeT>(Range))));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ITERATOR_H
diff --git a/linux-x64/clang/include/llvm/ADT/iterator_range.h b/linux-x64/clang/include/llvm/ADT/iterator_range.h
new file mode 100644
index 0000000..3cbf619
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/iterator_range.h
@@ -0,0 +1,68 @@
+//===- iterator_range.h - A range adaptor for iterators ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This provides a very simple, boring adaptor for a begin and end iterator
+/// into a range type. This should be used to build range views that work well
+/// with range based for loops and range based constructors.
+///
+/// Note that code here follows more standards-based coding conventions as it
+/// is mirroring proposed interfaces for standardization.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ITERATOR_RANGE_H
+#define LLVM_ADT_ITERATOR_RANGE_H
+
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+/// \brief A range adaptor for a pair of iterators.
+///
+/// This just wraps two iterators into a range-compatible interface. Nothing
+/// fancy at all.
+template <typename IteratorT>
+class iterator_range {
+  IteratorT begin_iterator, end_iterator;
+
+public:
+  //TODO: Add SFINAE to test that the Container's iterators match the range's
+  //      iterators.
+  template <typename Container>
+  iterator_range(Container &&c)
+  //TODO: Consider ADL/non-member begin/end calls.
+      : begin_iterator(c.begin()), end_iterator(c.end()) {}
+  iterator_range(IteratorT begin_iterator, IteratorT end_iterator)
+      : begin_iterator(std::move(begin_iterator)),
+        end_iterator(std::move(end_iterator)) {}
+
+  IteratorT begin() const { return begin_iterator; }
+  IteratorT end() const { return end_iterator; }
+};
+
+/// \brief Convenience function for iterating over sub-ranges.
+///
+/// This provides a bit of syntactic sugar to make using sub-ranges
+/// in for loops a bit easier. Analogous to std::make_pair().
+template <class T> iterator_range<T> make_range(T x, T y) {
+  return iterator_range<T>(std::move(x), std::move(y));
+}
+
+template <typename T> iterator_range<T> make_range(std::pair<T, T> p) {
+  return iterator_range<T>(std::move(p.first), std::move(p.second));
+}
+
+template<typename T>
+iterator_range<decltype(begin(std::declval<T>()))> drop_begin(T &&t, int n) {
+  return make_range(std::next(begin(t), n), end(t));
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/simple_ilist.h b/linux-x64/clang/include/llvm/ADT/simple_ilist.h
new file mode 100644
index 0000000..4c7598a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/simple_ilist.h
@@ -0,0 +1,315 @@
+//===- llvm/ADT/simple_ilist.h - Simple Intrusive List ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SIMPLE_ILIST_H
+#define LLVM_ADT_SIMPLE_ILIST_H
+
+#include "llvm/ADT/ilist_base.h"
+#include "llvm/ADT/ilist_iterator.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/ilist_node_options.h"
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <functional>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+/// A simple intrusive list implementation.
+///
+/// This is a simple intrusive list for a \c T that inherits from \c
+/// ilist_node<T>.  The list never takes ownership of anything inserted in it.
+///
+/// Unlike \a iplist<T> and \a ilist<T>, \a simple_ilist<T> never allocates or
+/// deletes values, and has no callback traits.
+///
+/// The API for adding nodes include \a push_front(), \a push_back(), and \a
+/// insert().  These all take values by reference (not by pointer), except for
+/// the range version of \a insert().
+///
+/// There are three sets of API for discarding nodes from the list: \a
+/// remove(), which takes a reference to the node to remove, \a erase(), which
+/// takes an iterator or iterator range and returns the next one, and \a
+/// clear(), which empties out the container.  All three are constant time
+/// operations.  None of these deletes any nodes; in particular, if there is a
+/// single node in the list, then these have identical semantics:
+/// \li \c L.remove(L.front());
+/// \li \c L.erase(L.begin());
+/// \li \c L.clear();
+///
+/// As a convenience for callers, there are parallel APIs that take a \c
+/// Disposer (such as \c std::default_delete<T>): \a removeAndDispose(), \a
+/// eraseAndDispose(), and \a clearAndDispose().  These have different names
+/// because the extra semantic is otherwise non-obvious.  They are equivalent
+/// to calling \a std::for_each() on the range to be discarded.
+///
+/// The currently available \p Options customize the nodes in the list.  The
+/// same options must be specified in the \a ilist_node instantation for
+/// compatibility (although the order is irrelevant).
+/// \li Use \a ilist_tag to designate which ilist_node for a given \p T this
+/// list should use.  This is useful if a type \p T is part of multiple,
+/// independent lists simultaneously.
+/// \li Use \a ilist_sentinel_tracking to always (or never) track whether a
+/// node is a sentinel.  Specifying \c true enables the \a
+/// ilist_node::isSentinel() API.  Unlike \a ilist_node::isKnownSentinel(),
+/// which is only appropriate for assertions, \a ilist_node::isSentinel() is
+/// appropriate for real logic.
+///
+/// Here are examples of \p Options usage:
+/// \li \c simple_ilist<T> gives the defaults.  \li \c
+/// simple_ilist<T,ilist_sentinel_tracking<true>> enables the \a
+/// ilist_node::isSentinel() API.
+/// \li \c simple_ilist<T,ilist_tag<A>,ilist_sentinel_tracking<false>>
+/// specifies a tag of A and that tracking should be off (even when
+/// LLVM_ENABLE_ABI_BREAKING_CHECKS are enabled).
+/// \li \c simple_ilist<T,ilist_sentinel_tracking<false>,ilist_tag<A>> is
+/// equivalent to the last.
+///
+/// See \a is_valid_option for steps on adding a new option.
+template <typename T, class... Options>
+class simple_ilist
+    : ilist_detail::compute_node_options<T, Options...>::type::list_base_type,
+      ilist_detail::SpecificNodeAccess<
+          typename ilist_detail::compute_node_options<T, Options...>::type> {
+  static_assert(ilist_detail::check_options<Options...>::value,
+                "Unrecognized node option!");
+  using OptionsT =
+      typename ilist_detail::compute_node_options<T, Options...>::type;
+  using list_base_type = typename OptionsT::list_base_type;
+  ilist_sentinel<OptionsT> Sentinel;
+
+public:
+  using value_type = typename OptionsT::value_type;
+  using pointer = typename OptionsT::pointer;
+  using reference = typename OptionsT::reference;
+  using const_pointer = typename OptionsT::const_pointer;
+  using const_reference = typename OptionsT::const_reference;
+  using iterator = ilist_iterator<OptionsT, false, false>;
+  using const_iterator = ilist_iterator<OptionsT, false, true>;
+  using reverse_iterator = ilist_iterator<OptionsT, true, false>;
+  using const_reverse_iterator = ilist_iterator<OptionsT, true, true>;
+  using size_type = size_t;
+  using difference_type = ptrdiff_t;
+
+  simple_ilist() = default;
+  ~simple_ilist() = default;
+
+  // No copy constructors.
+  simple_ilist(const simple_ilist &) = delete;
+  simple_ilist &operator=(const simple_ilist &) = delete;
+
+  // Move constructors.
+  simple_ilist(simple_ilist &&X) { splice(end(), X); }
+  simple_ilist &operator=(simple_ilist &&X) {
+    clear();
+    splice(end(), X);
+    return *this;
+  }
+
+  iterator begin() { return ++iterator(Sentinel); }
+  const_iterator begin() const { return ++const_iterator(Sentinel); }
+  iterator end() { return iterator(Sentinel); }
+  const_iterator end() const { return const_iterator(Sentinel); }
+  reverse_iterator rbegin() { return ++reverse_iterator(Sentinel); }
+  const_reverse_iterator rbegin() const {
+    return ++const_reverse_iterator(Sentinel);
+  }
+  reverse_iterator rend() { return reverse_iterator(Sentinel); }
+  const_reverse_iterator rend() const {
+    return const_reverse_iterator(Sentinel);
+  }
+
+  /// Check if the list is empty in constant time.
+  LLVM_NODISCARD bool empty() const { return Sentinel.empty(); }
+
+  /// Calculate the size of the list in linear time.
+  LLVM_NODISCARD size_type size() const {
+    return std::distance(begin(), end());
+  }
+
+  reference front() { return *begin(); }
+  const_reference front() const { return *begin(); }
+  reference back() { return *rbegin(); }
+  const_reference back() const { return *rbegin(); }
+
+  /// Insert a node at the front; never copies.
+  void push_front(reference Node) { insert(begin(), Node); }
+
+  /// Insert a node at the back; never copies.
+  void push_back(reference Node) { insert(end(), Node); }
+
+  /// Remove the node at the front; never deletes.
+  void pop_front() { erase(begin()); }
+
+  /// Remove the node at the back; never deletes.
+  void pop_back() { erase(--end()); }
+
+  /// Swap with another list in place using std::swap.
+  void swap(simple_ilist &X) { std::swap(*this, X); }
+
+  /// Insert a node by reference; never copies.
+  iterator insert(iterator I, reference Node) {
+    list_base_type::insertBefore(*I.getNodePtr(), *this->getNodePtr(&Node));
+    return iterator(&Node);
+  }
+
+  /// Insert a range of nodes; never copies.
+  template <class Iterator>
+  void insert(iterator I, Iterator First, Iterator Last) {
+    for (; First != Last; ++First)
+      insert(I, *First);
+  }
+
+  /// Clone another list.
+  template <class Cloner, class Disposer>
+  void cloneFrom(const simple_ilist &L2, Cloner clone, Disposer dispose) {
+    clearAndDispose(dispose);
+    for (const_reference V : L2)
+      push_back(*clone(V));
+  }
+
+  /// Remove a node by reference; never deletes.
+  ///
+  /// \see \a erase() for removing by iterator.
+  /// \see \a removeAndDispose() if the node should be deleted.
+  void remove(reference N) { list_base_type::remove(*this->getNodePtr(&N)); }
+
+  /// Remove a node by reference and dispose of it.
+  template <class Disposer>
+  void removeAndDispose(reference N, Disposer dispose) {
+    remove(N);
+    dispose(&N);
+  }
+
+  /// Remove a node by iterator; never deletes.
+  ///
+  /// \see \a remove() for removing by reference.
+  /// \see \a eraseAndDispose() it the node should be deleted.
+  iterator erase(iterator I) {
+    assert(I != end() && "Cannot remove end of list!");
+    remove(*I++);
+    return I;
+  }
+
+  /// Remove a range of nodes; never deletes.
+  ///
+  /// \see \a eraseAndDispose() if the nodes should be deleted.
+  iterator erase(iterator First, iterator Last) {
+    list_base_type::removeRange(*First.getNodePtr(), *Last.getNodePtr());
+    return Last;
+  }
+
+  /// Remove a node by iterator and dispose of it.
+  template <class Disposer>
+  iterator eraseAndDispose(iterator I, Disposer dispose) {
+    auto Next = std::next(I);
+    erase(I);
+    dispose(&*I);
+    return Next;
+  }
+
+  /// Remove a range of nodes and dispose of them.
+  template <class Disposer>
+  iterator eraseAndDispose(iterator First, iterator Last, Disposer dispose) {
+    while (First != Last)
+      First = eraseAndDispose(First, dispose);
+    return Last;
+  }
+
+  /// Clear the list; never deletes.
+  ///
+  /// \see \a clearAndDispose() if the nodes should be deleted.
+  void clear() { Sentinel.reset(); }
+
+  /// Clear the list and dispose of the nodes.
+  template <class Disposer> void clearAndDispose(Disposer dispose) {
+    eraseAndDispose(begin(), end(), dispose);
+  }
+
+  /// Splice in another list.
+  void splice(iterator I, simple_ilist &L2) {
+    splice(I, L2, L2.begin(), L2.end());
+  }
+
+  /// Splice in a node from another list.
+  void splice(iterator I, simple_ilist &L2, iterator Node) {
+    splice(I, L2, Node, std::next(Node));
+  }
+
+  /// Splice in a range of nodes from another list.
+  void splice(iterator I, simple_ilist &, iterator First, iterator Last) {
+    list_base_type::transferBefore(*I.getNodePtr(), *First.getNodePtr(),
+                                   *Last.getNodePtr());
+  }
+
+  /// Merge in another list.
+  ///
+  /// \pre \c this and \p RHS are sorted.
+  ///@{
+  void merge(simple_ilist &RHS) { merge(RHS, std::less<T>()); }
+  template <class Compare> void merge(simple_ilist &RHS, Compare comp);
+  ///@}
+
+  /// Sort the list.
+  ///@{
+  void sort() { sort(std::less<T>()); }
+  template <class Compare> void sort(Compare comp);
+  ///@}
+};
+
+template <class T, class... Options>
+template <class Compare>
+void simple_ilist<T, Options...>::merge(simple_ilist &RHS, Compare comp) {
+  if (this == &RHS || RHS.empty())
+    return;
+  iterator LI = begin(), LE = end();
+  iterator RI = RHS.begin(), RE = RHS.end();
+  while (LI != LE) {
+    if (comp(*RI, *LI)) {
+      // Transfer a run of at least size 1 from RHS to LHS.
+      iterator RunStart = RI++;
+      RI = std::find_if(RI, RE, [&](reference RV) { return !comp(RV, *LI); });
+      splice(LI, RHS, RunStart, RI);
+      if (RI == RE)
+        return;
+    }
+    ++LI;
+  }
+  // Transfer the remaining RHS nodes once LHS is finished.
+  splice(LE, RHS, RI, RE);
+}
+
+template <class T, class... Options>
+template <class Compare>
+void simple_ilist<T, Options...>::sort(Compare comp) {
+  // Vacuously sorted.
+  if (empty() || std::next(begin()) == end())
+    return;
+
+  // Split the list in the middle.
+  iterator Center = begin(), End = begin();
+  while (End != end() && ++End != end()) {
+    ++Center;
+    ++End;
+  }
+  simple_ilist RHS;
+  RHS.splice(RHS.end(), *this, Center, end());
+
+  // Sort the sublists and merge back together.
+  sort(comp);
+  RHS.sort(comp);
+  merge(RHS, comp);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SIMPLE_ILIST_H